aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/device-drivers.tmpl5
-rw-r--r--Documentation/DocBook/deviceiobook.tmpl2
-rw-r--r--Documentation/DocBook/media/dvb/dvbproperty.xml12
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml18
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml23
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-frequency.xml7
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-input.xml4
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-output.xml5
-rw-r--r--Documentation/acpi/apei/einj.txt55
-rw-r--r--Documentation/devicetree/bindings/i2c/omap-i2c.txt30
-rw-r--r--Documentation/feature-removal-schedule.txt11
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/kernel-parameters.txt5
-rw-r--r--Documentation/power/basic-pm-debugging.txt2
-rw-r--r--Documentation/power/freezing-of-tasks.txt8
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas10
-rw-r--r--Documentation/scsi/LICENSE.qla4xxx23
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py60
-rw-r--r--Documentation/video4linux/v4l2-controls.txt21
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile4
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig (renamed from arch/arm/configs/mx5_defconfig)61
-rw-r--r--arch/arm/configs/mx3_defconfig144
-rw-r--r--arch/arm/mach-imx/Kconfig239
-rw-r--r--arch/arm/mach-imx/Makefile21
-rw-r--r--arch/arm/mach-imx/Makefile.boot12
-rw-r--r--arch/arm/mach-imx/clock-mx51-mx53.c (renamed from arch/arm/mach-mx5/clock-mx51-mx53.c)2
-rw-r--r--arch/arm/mach-imx/cpu-imx5.c (renamed from arch/arm/mach-mx5/cpu.c)0
-rw-r--r--arch/arm/mach-imx/cpu_op-mx51.c (renamed from arch/arm/mach-mx5/cpu_op-mx51.c)0
-rw-r--r--arch/arm/mach-imx/cpu_op-mx51.h (renamed from arch/arm/mach-mx5/cpu_op-mx51.h)0
-rw-r--r--arch/arm/mach-imx/crm-regs-imx5.h (renamed from arch/arm/mach-mx5/crm_regs.h)0
-rw-r--r--arch/arm/mach-imx/devices-imx50.h (renamed from arch/arm/mach-mx5/devices-imx50.h)0
-rw-r--r--arch/arm/mach-imx/devices-imx51.h (renamed from arch/arm/mach-mx5/devices-imx51.h)0
-rw-r--r--arch/arm/mach-imx/devices-imx53.h (renamed from arch/arm/mach-mx5/devices-imx53.h)0
-rw-r--r--arch/arm/mach-imx/efika.h (renamed from arch/arm/mach-mx5/efika.h)0
-rw-r--r--arch/arm/mach-imx/ehci-imx5.c (renamed from arch/arm/mach-mx5/ehci.c)0
-rw-r--r--arch/arm/mach-imx/eukrea_mbimx51-baseboard.c (renamed from arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c)0
-rw-r--r--arch/arm/mach-imx/eukrea_mbimxsd-baseboard.c (renamed from arch/arm/mach-mx5/eukrea_mbimxsd-baseboard.c)0
-rw-r--r--arch/arm/mach-imx/imx51-dt.c (renamed from arch/arm/mach-mx5/imx51-dt.c)0
-rw-r--r--arch/arm/mach-imx/imx53-dt.c (renamed from arch/arm/mach-mx5/imx53-dt.c)0
-rw-r--r--arch/arm/mach-imx/mach-cpuimx51.c (renamed from arch/arm/mach-mx5/board-cpuimx51.c)0
-rw-r--r--arch/arm/mach-imx/mach-cpuimx51sd.c (renamed from arch/arm/mach-mx5/board-cpuimx51sd.c)0
-rw-r--r--arch/arm/mach-imx/mach-mx50_rdp.c (renamed from arch/arm/mach-mx5/board-mx50_rdp.c)0
-rw-r--r--arch/arm/mach-imx/mach-mx51_3ds.c (renamed from arch/arm/mach-mx5/board-mx51_3ds.c)0
-rw-r--r--arch/arm/mach-imx/mach-mx51_babbage.c (renamed from arch/arm/mach-mx5/board-mx51_babbage.c)0
-rw-r--r--arch/arm/mach-imx/mach-mx51_efikamx.c (renamed from arch/arm/mach-mx5/board-mx51_efikamx.c)0
-rw-r--r--arch/arm/mach-imx/mach-mx51_efikasb.c (renamed from arch/arm/mach-mx5/board-mx51_efikasb.c)0
-rw-r--r--arch/arm/mach-imx/mach-mx53_ard.c (renamed from arch/arm/mach-mx5/board-mx53_ard.c)1
-rw-r--r--arch/arm/mach-imx/mach-mx53_evk.c (renamed from arch/arm/mach-mx5/board-mx53_evk.c)1
-rw-r--r--arch/arm/mach-imx/mach-mx53_loco.c (renamed from arch/arm/mach-mx5/board-mx53_loco.c)1
-rw-r--r--arch/arm/mach-imx/mach-mx53_smd.c (renamed from arch/arm/mach-mx5/board-mx53_smd.c)1
-rw-r--r--arch/arm/mach-imx/mm-imx5.c (renamed from arch/arm/mach-mx5/mm.c)0
-rw-r--r--arch/arm/mach-imx/mx51_efika.c (renamed from arch/arm/mach-mx5/mx51_efika.c)0
-rw-r--r--arch/arm/mach-imx/pm-imx5.c (renamed from arch/arm/mach-mx5/system.c)89
-rw-r--r--arch/arm/mach-mx5/Kconfig244
-rw-r--r--arch/arm/mach-mx5/Makefile26
-rw-r--r--arch/arm/mach-mx5/Makefile.boot9
-rw-r--r--arch/arm/mach-mx5/pm-imx5.c83
-rw-r--r--arch/arm/plat-mxc/Kconfig15
-rw-r--r--arch/ia64/kernel/acpi.c10
-rw-r--r--arch/s390/include/asm/kexec.h18
-rw-r--r--arch/score/kernel/entry.S2
-rw-r--r--arch/x86/.gitignore1
-rw-r--r--arch/x86/Kconfig20
-rw-r--r--arch/x86/include/asm/unistd.h1
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h107
-rw-r--r--arch/x86/kernel/e820.c4
-rw-r--r--arch/x86/kernel/tsc.c14
-rw-r--r--arch/x86/lib/x86-opcode-map.txt8
-rw-r--r--arch/x86/mm/srat.c4
-rw-r--r--arch/x86/platform/uv/tlb_uv.c388
-rw-r--r--arch/x86/um/shared/sysdep/ptrace.h5
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpica/Makefile158
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconfig.h9
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h3
-rw-r--r--drivers/acpi/acpica/acglobal.h17
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h4
-rw-r--r--drivers/acpi/acpica/aclocal.h26
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h8
-rw-r--r--drivers/acpi/acpica/acopcode.h6
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h41
-rw-r--r--drivers/acpi/acpica/acresrc.h115
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h21
-rw-r--r--drivers/acpi/acpica/amlcode.h29
-rw-r--r--drivers/acpi/acpica/amlresrc.h138
-rw-r--r--drivers/acpi/acpica/dsargs.c18
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c83
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsmthdat.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c14
-rw-r--r--drivers/acpi/acpica/evglock.c8
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c31
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evsci.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c8
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c31
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c9
-rw-r--r--drivers/acpi/acpica/exfield.c30
-rw-r--r--drivers/acpi/acpica/exfldio.c38
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c27
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/exutils.c27
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c2
-rw-r--r--drivers/acpi/acpica/hwpci.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c4
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsalloc.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nseval.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c31
-rw-r--r--drivers/acpi/acpica/nsrepair.c3
-rw-r--r--drivers/acpi/acpica/nsrepair2.c7
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/nsxfobj.c2
-rw-r--r--drivers/acpi/acpica/psargs.c143
-rw-r--r--drivers/acpi/acpica/psloop.c2
-rw-r--r--drivers/acpi/acpica/psopcode.c15
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c8
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c2
-rw-r--r--drivers/acpi/acpica/rscalc.c89
-rw-r--r--drivers/acpi/acpica/rscreate.c69
-rw-r--r--drivers/acpi/acpica/rsdump.c196
-rw-r--r--drivers/acpi/acpica/rsinfo.c58
-rw-r--r--drivers/acpi/acpica/rsio.c2
-rw-r--r--drivers/acpi/acpica/rsirq.c33
-rw-r--r--drivers/acpi/acpica/rslist.c77
-rw-r--r--drivers/acpi/acpica/rsmemory.c2
-rw-r--r--drivers/acpi/acpica/rsmisc.c269
-rw-r--r--drivers/acpi/acpica/rsserial.c441
-rw-r--r--drivers/acpi/acpica/rsutils.c56
-rw-r--r--drivers/acpi/acpica/rsxface.c52
-rw-r--r--drivers/acpi/acpica/tbfadt.c41
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c9
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c294
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c6
-rw-r--r--drivers/acpi/acpica/utdelete.c15
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c8
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c3
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c2
-rw-r--r--drivers/acpi/acpica/utmutex.c11
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c278
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/utxface.c40
-rw-r--r--drivers/acpi/acpica/utxferror.c2
-rw-r--r--drivers/acpi/acpica/utxfmutex.c187
-rw-r--r--drivers/acpi/apei/apei-base.c150
-rw-r--r--drivers/acpi/apei/apei-internal.h6
-rw-r--r--drivers/acpi/apei/einj.c290
-rw-r--r--drivers/acpi/apei/erst.c5
-rw-r--r--drivers/acpi/apei/ghes.c102
-rw-r--r--drivers/acpi/apei/hest.c5
-rw-r--r--drivers/acpi/atomicio.c77
-rw-r--r--drivers/acpi/numa.c6
-rw-r--r--drivers/acpi/nvs.c53
-rw-r--r--drivers/acpi/osl.c242
-rw-r--r--drivers/acpi/processor_core.c26
-rw-r--r--drivers/acpi/processor_driver.c20
-rw-r--r--drivers/base/bus.c9
-rw-r--r--drivers/base/firmware_class.c3
-rw-r--r--drivers/block/Kconfig11
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/nvme.c1739
-rw-r--r--drivers/char/tpm/tpm.c9
-rw-r--r--drivers/char/tpm/tpm.h3
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-ml-ioh.c8
-rw-r--r--drivers/gpio/gpio-pch.c4
-rw-r--r--drivers/gpio/gpio-tps65910.c2
-rw-r--r--drivers/gpu/drm/gma500/gtt.c5
-rw-r--r--drivers/i2c/busses/Kconfig14
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c19
-rw-r--r--drivers/i2c/busses/i2c-omap.c110
-rw-r--r--drivers/idle/intel_idle.c96
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/ulp/srpt/Kconfig12
-rw-r--r--drivers/infiniband/ulp/srpt/Makefile2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_dm_mad.h139
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c4073
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h444
-rw-r--r--drivers/leds/Kconfig7
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-ot200.c171
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c27
-rw-r--r--drivers/media/common/tuners/xc4000.c86
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c41
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c20
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700.h2
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c1
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c150
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_core.c10
-rw-r--r--drivers/media/dvb/frontends/ds3000.c2
-rw-r--r--drivers/media/dvb/frontends/mb86a20s.c8
-rw-r--r--drivers/media/dvb/frontends/tda18271c2dd.c1
-rw-r--r--drivers/media/video/as3645a.c1
-rw-r--r--drivers/media/video/cx18/cx18-fileops.c41
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c4
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c5
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c7
-rw-r--r--drivers/media/video/cx88/cx88-cards.c24
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c3
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h3
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c118
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c22
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c4
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c2
-rw-r--r--drivers/media/video/ivtv/ivtv-yuv.c22
-rw-r--r--drivers/media/video/omap/omap_vout.c7
-rw-r--r--drivers/media/video/pwc/pwc-ctrl.c239
-rw-r--r--drivers/media/video/pwc/pwc-dec1.c16
-rw-r--r--drivers/media/video/pwc/pwc-dec1.h6
-rw-r--r--drivers/media/video/pwc/pwc-dec23.c41
-rw-r--r--drivers/media/video/pwc/pwc-dec23.h9
-rw-r--r--drivers/media/video/pwc/pwc-if.c175
-rw-r--r--drivers/media/video/pwc/pwc-misc.c1
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c90
-rw-r--r--drivers/media/video/pwc/pwc.h14
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c7
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c6
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.c1
-rw-r--r--drivers/media/video/s5p-g2d/g2d.c1
-rw-r--r--drivers/media/video/s5p-jpeg/jpeg-core.c7
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc.c3
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.c2
-rw-r--r--drivers/media/video/saa7164/saa7164-cards.c4
-rw-r--r--drivers/media/video/tlg2300/pd-main.c4
-rw-r--r--drivers/media/video/v4l2-ctrls.c54
-rw-r--r--drivers/media/video/v4l2-ioctl.c8
-rw-r--r--drivers/media/video/zoran/zoran_driver.c1
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/scsi/Kconfig5
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h7
-rw-r--r--drivers/scsi/bfa/bfa_fc.h155
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c416
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h7
-rw-r--r--drivers/scsi/bfa/bfa_svc.h5
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_attr.c2
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c27
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/bfa/bfad_im.c56
-rw-r--r--drivers/scsi/bfa/bfad_im.h27
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c46
-rw-r--r--drivers/scsi/fcoe/fcoe.h4
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/isci/firmware/Makefile19
-rw-r--r--drivers/scsi/isci/firmware/README36
-rw-r--r--drivers/scsi/isci/firmware/create_fw.c99
-rw-r--r--drivers/scsi/isci/firmware/create_fw.h77
-rw-r--r--drivers/scsi/isci/host.c340
-rw-r--r--drivers/scsi/isci/host.h27
-rw-r--r--drivers/scsi/isci/init.c25
-rw-r--r--drivers/scsi/isci/isci.h1
-rw-r--r--drivers/scsi/isci/phy.c172
-rw-r--r--drivers/scsi/isci/port.c104
-rw-r--r--drivers/scsi/isci/port.h10
-rw-r--r--drivers/scsi/isci/port_config.c35
-rw-r--r--drivers/scsi/isci/probe_roms.c2
-rw-r--r--drivers/scsi/isci/probe_roms.h89
-rw-r--r--drivers/scsi/isci/remote_device.c10
-rw-r--r--drivers/scsi/isci/task.c2
-rw-r--r--drivers/scsi/isci/task.h7
-rw-r--r--drivers/scsi/libfc/fc_disc.c6
-rw-r--r--drivers/scsi/libfc/fc_elsct.c1
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c4
-rw-r--r--drivers/scsi/libfc/fc_lport.c5
-rw-r--r--drivers/scsi/libfc/fc_rport.c10
-rw-r--r--drivers/scsi/megaraid.c13
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c145
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c7
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h22
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c511
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/scsi/scsi_transport_fc.c3
-rw-r--r--drivers/scsi/sg.c25
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/target/iscsi/iscsi_target.c19
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c36
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c23
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c8
-rw-r--r--drivers/target/loopback/tcm_loop.c33
-rw-r--r--drivers/target/loopback/tcm_loop.h11
-rw-r--r--drivers/target/target_core_alua.c7
-rw-r--r--drivers/target/target_core_cdb.c39
-rw-r--r--drivers/target/target_core_cdb.h14
-rw-r--r--drivers/target/target_core_configfs.c34
-rw-r--r--drivers/target/target_core_device.c15
-rw-r--r--drivers/target/target_core_fabric_configfs.c8
-rw-r--r--drivers/target/target_core_fabric_lib.c13
-rw-r--r--drivers/target/target_core_file.c15
-rw-r--r--drivers/target/target_core_hba.c7
-rw-r--r--drivers/target/target_core_hba.h7
-rw-r--r--drivers/target/target_core_iblock.c7
-rw-r--r--drivers/target/target_core_internal.h123
-rw-r--r--drivers/target/target_core_pr.c24
-rw-r--r--drivers/target/target_core_pr.h2
-rw-r--r--drivers/target/target_core_pscsi.c20
-rw-r--r--drivers/target/target_core_rd.c6
-rw-r--r--drivers/target/target_core_stat.c9
-rw-r--r--drivers/target/target_core_stat.h8
-rw-r--r--drivers/target/target_core_tmr.c36
-rw-r--r--drivers/target/target_core_tpg.c9
-rw-r--r--drivers/target/target_core_transport.c343
-rw-r--r--drivers/target/target_core_ua.c6
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c57
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c6
-rw-r--r--drivers/target/tcm_fc/tfc_io.c5
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c5
-rw-r--r--drivers/video/backlight/adp8860_bl.c2
-rw-r--r--drivers/video/backlight/adp8870_bl.c2
-rw-r--r--drivers/video/backlight/l4f00242t03.c2
-rw-r--r--drivers/xen/biomerge.c2
-rw-r--r--firmware/Makefile1
-rw-r--r--firmware/isci/isci_firmware.bin.ihex16
-rw-r--r--fs/cifs/Kconfig3
-rw-r--r--fs/cifs/cifs_debug.c11
-rw-r--r--fs/cifs/cifs_spnego.c10
-rw-r--r--fs/cifs/cifs_unicode.c41
-rw-r--r--fs/cifs/cifs_unicode.h20
-rw-r--r--fs/cifs/cifsacl.c2
-rw-r--r--fs/cifs/cifsencrypt.c21
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifssmb.c162
-rw-r--r--fs/cifs/connect.c305
-rw-r--r--fs/cifs/readdir.c9
-rw-r--r--fs/cifs/sess.c34
-rw-r--r--fs/cifs/smbencrypt.c2
-rw-r--r--fs/debugfs/file.c2
-rw-r--r--fs/ext2/ioctl.c22
-rw-r--r--fs/inode.c3
-rw-r--r--fs/jbd/checkpoint.c27
-rw-r--r--fs/jbd/recovery.c4
-rw-r--r--fs/proc/stat.c2
-rw-r--r--fs/proc/task_mmu.c3
-rw-r--r--fs/qnx4/inode.c62
-rw-r--r--fs/quota/dquot.c8
-rw-r--r--fs/super.c2
-rw-r--r--include/acpi/acnames.h1
-rw-r--r--include/acpi/acpi_numa.h1
-rw-r--r--include/acpi/acpiosxf.h7
-rw-r--r--include/acpi/acpixf.h32
-rw-r--r--include/acpi/acrestyp.h207
-rw-r--r--include/acpi/actbl.h23
-rw-r--r--include/acpi/actbl1.h57
-rw-r--r--include/acpi/actbl3.h552
-rw-r--r--include/acpi/actypes.h12
-rw-r--r--include/keys/user-type.h3
-rw-r--r--include/linux/acpi.h20
-rw-r--r--include/linux/acpi_io.h3
-rw-r--r--include/linux/cpuidle.h7
-rw-r--r--include/linux/device.h15
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/key.h2
-rw-r--r--include/linux/migrate.h14
-rw-r--r--include/linux/migrate_mode.h16
-rw-r--r--include/linux/nvme.h434
-rw-r--r--include/linux/quota.h6
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/shmem_fs.h1
-rw-r--r--include/linux/suspend.h19
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/usb.h1
-rw-r--r--include/media/tuner.h3
-rw-r--r--include/net/cfg80211.h6
-rw-r--r--include/scsi/libfc.h2
-rw-r--r--include/target/target_core_backend.h65
-rw-r--r--include/target/target_core_base.h169
-rw-r--r--include/target/target_core_device.h63
-rw-r--r--include/target/target_core_fabric.h (renamed from include/target/target_core_fabric_ops.h)94
-rw-r--r--include/target/target_core_fabric_lib.h28
-rw-r--r--include/target/target_core_tmr.h35
-rw-r--r--include/target/target_core_tpg.h35
-rw-r--r--include/target/target_core_transport.h287
-rw-r--r--ipc/mqueue.c3
-rw-r--r--ipc/shm.c37
-rw-r--r--kernel/auditsc.c5
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/power/snapshot.c3
-rw-r--r--kernel/sched/cpupri.c3
-rw-r--r--kernel/tracepoint.c7
-rw-r--r--lib/mpi/mpicoder.c2
-rw-r--r--mm/hugetlb.c9
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/memory.c37
-rw-r--r--mm/page_alloc.c20
-rw-r--r--mm/shmem.c53
-rw-r--r--mm/vmscan.c124
-rwxr-xr-xscripts/kernel-doc3
-rw-r--r--security/integrity/ima/ima_policy.c3
-rw-r--r--security/keys/internal.h1
-rw-r--r--security/keys/key.c1
-rw-r--r--security/keys/user_defined.c43
-rw-r--r--sound/pci/hda/alc880_quirks.c17
-rw-r--r--sound/pci/hda/alc882_quirks.c15
-rw-r--r--sound/pci/hda/hda_intel.c6
-rw-r--r--sound/pci/hda/patch_conexant.c2
-rw-r--r--sound/pci/hda/patch_realtek.c47
-rw-r--r--sound/pci/hda/patch_sigmatel.c10
-rw-r--r--sound/soc/codecs/sgtl5000.c2
-rw-r--r--sound/soc/codecs/wm8993.c6
-rw-r--r--sound/soc/imx/imx-pcm-dma-mx2.c12
-rw-r--r--sound/soc/soc-core.c4
-rw-r--r--sound/soc/soc-dapm.c2
-rw-r--r--tools/power/x86/turbostat/turbostat.c2
506 files changed, 16509 insertions, 5178 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index b638e50cf8f6..2f7fd4360848 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -50,7 +50,9 @@
50 50
51 <sect1><title>Delaying, scheduling, and timer routines</title> 51 <sect1><title>Delaying, scheduling, and timer routines</title>
52!Iinclude/linux/sched.h 52!Iinclude/linux/sched.h
53!Ekernel/sched.c 53!Ekernel/sched/core.c
54!Ikernel/sched/cpupri.c
55!Ikernel/sched/fair.c
54!Iinclude/linux/completion.h 56!Iinclude/linux/completion.h
55!Ekernel/timer.c 57!Ekernel/timer.c
56 </sect1> 58 </sect1>
@@ -216,7 +218,6 @@ X!Isound/sound_firmware.c
216 218
217 <chapter id="uart16x50"> 219 <chapter id="uart16x50">
218 <title>16x50 UART Driver</title> 220 <title>16x50 UART Driver</title>
219!Iinclude/linux/serial_core.h
220!Edrivers/tty/serial/serial_core.c 221!Edrivers/tty/serial/serial_core.c
221!Edrivers/tty/serial/8250.c 222!Edrivers/tty/serial/8250.c
222 </chapter> 223 </chapter>
diff --git a/Documentation/DocBook/deviceiobook.tmpl b/Documentation/DocBook/deviceiobook.tmpl
index c1ed6a49e598..54199a0dcf9a 100644
--- a/Documentation/DocBook/deviceiobook.tmpl
+++ b/Documentation/DocBook/deviceiobook.tmpl
@@ -317,7 +317,7 @@ CPU B: spin_unlock_irqrestore(&amp;dev_lock, flags)
317 <chapter id="pubfunctions"> 317 <chapter id="pubfunctions">
318 <title>Public Functions Provided</title> 318 <title>Public Functions Provided</title>
319!Iarch/x86/include/asm/io.h 319!Iarch/x86/include/asm/io.h
320!Elib/iomap.c 320!Elib/pci_iomap.c
321 </chapter> 321 </chapter>
322 322
323</book> 323</book>
diff --git a/Documentation/DocBook/media/dvb/dvbproperty.xml b/Documentation/DocBook/media/dvb/dvbproperty.xml
index ffee1fbbc001..c7a4ca517859 100644
--- a/Documentation/DocBook/media/dvb/dvbproperty.xml
+++ b/Documentation/DocBook/media/dvb/dvbproperty.xml
@@ -163,14 +163,16 @@ get/set up to 64 properties. The actual meaning of each property is described on
163 <section id="DTV-FREQUENCY"> 163 <section id="DTV-FREQUENCY">
164 <title><constant>DTV_FREQUENCY</constant></title> 164 <title><constant>DTV_FREQUENCY</constant></title>
165 165
166 <para>Central frequency of the channel, in HZ.</para> 166 <para>Central frequency of the channel.</para>
167 167
168 <para>Notes:</para> 168 <para>Notes:</para>
169 <para>1)For ISDB-T, the channels are usually transmitted with an offset of 143kHz. 169 <para>1)For satellital delivery systems, it is measured in kHz.
170 For the other ones, it is measured in Hz.</para>
171 <para>2)For ISDB-T, the channels are usually transmitted with an offset of 143kHz.
170 E.g. a valid frequncy could be 474143 kHz. The stepping is bound to the bandwidth of 172 E.g. a valid frequncy could be 474143 kHz. The stepping is bound to the bandwidth of
171 the channel which is 6MHz.</para> 173 the channel which is 6MHz.</para>
172 174
173 <para>2)As in ISDB-Tsb the channel consists of only one or three segments the 175 <para>3)As in ISDB-Tsb the channel consists of only one or three segments the
174 frequency step is 429kHz, 3*429 respectively. As for ISDB-T the 176 frequency step is 429kHz, 3*429 respectively. As for ISDB-T the
175 central frequency of the channel is expected.</para> 177 central frequency of the channel is expected.</para>
176 </section> 178 </section>
@@ -735,14 +737,10 @@ typedef enum fe_hierarchy {
735 <listitem><para><link linkend="DTV-TUNE"><constant>DTV_TUNE</constant></link></para></listitem> 737 <listitem><para><link linkend="DTV-TUNE"><constant>DTV_TUNE</constant></link></para></listitem>
736 <listitem><para><link linkend="DTV-CLEAR"><constant>DTV_CLEAR</constant></link></para></listitem> 738 <listitem><para><link linkend="DTV-CLEAR"><constant>DTV_CLEAR</constant></link></para></listitem>
737 <listitem><para><link linkend="DTV-FREQUENCY"><constant>DTV_FREQUENCY</constant></link></para></listitem> 739 <listitem><para><link linkend="DTV-FREQUENCY"><constant>DTV_FREQUENCY</constant></link></para></listitem>
738 <listitem><para><link linkend="DTV-MODULATION"><constant>DTV_MODULATION</constant></link></para></listitem>
739 <listitem><para><link linkend="DTV-BANDWIDTH-HZ"><constant>DTV_BANDWIDTH_HZ</constant></link></para></listitem> 740 <listitem><para><link linkend="DTV-BANDWIDTH-HZ"><constant>DTV_BANDWIDTH_HZ</constant></link></para></listitem>
740 <listitem><para><link linkend="DTV-INVERSION"><constant>DTV_INVERSION</constant></link></para></listitem> 741 <listitem><para><link linkend="DTV-INVERSION"><constant>DTV_INVERSION</constant></link></para></listitem>
741 <listitem><para><link linkend="DTV-CODE-RATE-HP"><constant>DTV_CODE_RATE_HP</constant></link></para></listitem>
742 <listitem><para><link linkend="DTV-CODE-RATE-LP"><constant>DTV_CODE_RATE_LP</constant></link></para></listitem>
743 <listitem><para><link linkend="DTV-GUARD-INTERVAL"><constant>DTV_GUARD_INTERVAL</constant></link></para></listitem> 742 <listitem><para><link linkend="DTV-GUARD-INTERVAL"><constant>DTV_GUARD_INTERVAL</constant></link></para></listitem>
744 <listitem><para><link linkend="DTV-TRANSMISSION-MODE"><constant>DTV_TRANSMISSION_MODE</constant></link></para></listitem> 743 <listitem><para><link linkend="DTV-TRANSMISSION-MODE"><constant>DTV_TRANSMISSION_MODE</constant></link></para></listitem>
745 <listitem><para><link linkend="DTV-HIERARCHY"><constant>DTV_HIERARCHY</constant></link></para></listitem>
746 <listitem><para><link linkend="DTV-ISDBT-LAYER-ENABLED"><constant>DTV_ISDBT_LAYER_ENABLED</constant></link></para></listitem> 744 <listitem><para><link linkend="DTV-ISDBT-LAYER-ENABLED"><constant>DTV_ISDBT_LAYER_ENABLED</constant></link></para></listitem>
747 <listitem><para><link linkend="DTV-ISDBT-PARTIAL-RECEPTION"><constant>DTV_ISDBT_PARTIAL_RECEPTION</constant></link></para></listitem> 745 <listitem><para><link linkend="DTV-ISDBT-PARTIAL-RECEPTION"><constant>DTV_ISDBT_PARTIAL_RECEPTION</constant></link></para></listitem>
748 <listitem><para><link linkend="DTV-ISDBT-SOUND-BROADCASTING"><constant>DTV_ISDBT_SOUND_BROADCASTING</constant></link></para></listitem> 746 <listitem><para><link linkend="DTV-ISDBT-SOUND-BROADCASTING"><constant>DTV_ISDBT_SOUND_BROADCASTING</constant></link></para></listitem>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml b/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
index 6f1f9a629dc3..b17a7aac6997 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
@@ -183,7 +183,12 @@ applications must set the array to zero.</entry>
183 <entry>__u32</entry> 183 <entry>__u32</entry>
184 <entry><structfield>ctrl_class</structfield></entry> 184 <entry><structfield>ctrl_class</structfield></entry>
185 <entry>The control class to which all controls belong, see 185 <entry>The control class to which all controls belong, see
186<xref linkend="ctrl-class" />.</entry> 186<xref linkend="ctrl-class" />. Drivers that use a kernel framework for handling
187controls will also accept a value of 0 here, meaning that the controls can
188belong to any control class. Whether drivers support this can be tested by setting
189<structfield>ctrl_class</structfield> to 0 and calling <constant>VIDIOC_TRY_EXT_CTRLS</constant>
190with a <structfield>count</structfield> of 0. If that succeeds, then the driver
191supports this feature.</entry>
187 </row> 192 </row>
188 <row> 193 <row>
189 <entry>__u32</entry> 194 <entry>__u32</entry>
@@ -194,10 +199,13 @@ also be zero.</entry>
194 <row> 199 <row>
195 <entry>__u32</entry> 200 <entry>__u32</entry>
196 <entry><structfield>error_idx</structfield></entry> 201 <entry><structfield>error_idx</structfield></entry>
197 <entry>Set by the driver in case of an error. It is the 202 <entry>Set by the driver in case of an error. If it is equal
198index of the control causing the error or equal to 'count' when the 203to <structfield>count</structfield>, then no actual changes were made to
199error is not associated with a particular control. Undefined when the 204controls. In other words, the error was not associated with setting a particular
200ioctl returns 0 (success).</entry> 205control. If it is another value, then only the controls up to <structfield>error_idx-1</structfield>
206were modified and control <structfield>error_idx</structfield> is the one that
207caused the error. The <structfield>error_idx</structfield> value is undefined
208if the ioctl returned 0 (success).</entry>
201 </row> 209 </row>
202 <row> 210 <row>
203 <entry>__u32</entry> 211 <entry>__u32</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml b/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
index 93817f337033..7c63815e7afd 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
@@ -364,15 +364,20 @@ capability and it is cleared otherwise.</entry>
364 <row> 364 <row>
365 <entry><constant>V4L2_FBUF_FLAG_OVERLAY</constant></entry> 365 <entry><constant>V4L2_FBUF_FLAG_OVERLAY</constant></entry>
366 <entry>0x0002</entry> 366 <entry>0x0002</entry>
367 <entry>The frame buffer is an overlay surface the same 367 <entry>If this flag is set for a video capture device, then the
368size as the capture. [?]</entry> 368driver will set the initial overlay size to cover the full framebuffer size,
369 </row> 369otherwise the existing overlay size (as set by &VIDIOC-S-FMT;) will be used.
370 <row> 370
371 <entry spanname="hspan">The purpose of 371Only one video capture driver (bttv) supports this flag. The use of this flag
372<constant>V4L2_FBUF_FLAG_OVERLAY</constant> was never quite clear. 372for capture devices is deprecated. There is no way to detect which drivers
373Most drivers seem to ignore this flag. For compatibility with the 373support this flag, so the only reliable method of setting the overlay size is
374<wordasword>bttv</wordasword> driver applications should set the 374through &VIDIOC-S-FMT;.
375<constant>V4L2_FBUF_FLAG_OVERLAY</constant> flag.</entry> 375
376If this flag is set for a video output device, then the video output overlay
377window is relative to the top-left corner of the framebuffer and restricted
378to the size of the framebuffer. If it is cleared, then the video output
379overlay window is relative to the video output display.
380 </entry>
376 </row> 381 </row>
377 <row> 382 <row>
378 <entry><constant>V4L2_FBUF_FLAG_CHROMAKEY</constant></entry> 383 <entry><constant>V4L2_FBUF_FLAG_CHROMAKEY</constant></entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml b/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
index 16431813bebd..66e9a5257861 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
@@ -98,8 +98,11 @@ the &v4l2-output; <structfield>modulator</structfield> field and the
98 <entry>&v4l2-tuner-type;</entry> 98 <entry>&v4l2-tuner-type;</entry>
99 <entry><structfield>type</structfield></entry> 99 <entry><structfield>type</structfield></entry>
100 <entry>The tuner type. This is the same value as in the 100 <entry>The tuner type. This is the same value as in the
101&v4l2-tuner; <structfield>type</structfield> field. The field is not 101&v4l2-tuner; <structfield>type</structfield> field. The type must be set
102applicable to modulators, &ie; ignored by drivers.</entry> 102to <constant>V4L2_TUNER_RADIO</constant> for <filename>/dev/radioX</filename>
103device nodes, and to <constant>V4L2_TUNER_ANALOG_TV</constant>
104for all others. The field is not applicable to modulators, &ie; ignored
105by drivers.</entry>
103 </row> 106 </row>
104 <row> 107 <row>
105 <entry>__u32</entry> 108 <entry>__u32</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-input.xml b/Documentation/DocBook/media/v4l/vidioc-g-input.xml
index 08ae82f131f2..1d43065090dd 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-input.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-input.xml
@@ -61,8 +61,8 @@ desired input in an integer and call the
61<constant>VIDIOC_S_INPUT</constant> ioctl with a pointer to this 61<constant>VIDIOC_S_INPUT</constant> ioctl with a pointer to this
62integer. Side effects are possible. For example inputs may support 62integer. Side effects are possible. For example inputs may support
63different video standards, so the driver may implicitly switch the 63different video standards, so the driver may implicitly switch the
64current standard. It is good practice to select an input before 64current standard. Because of these possible side effects applications
65querying or negotiating any other parameters.</para> 65must select an input before querying or negotiating any other parameters.</para>
66 66
67 <para>Information about video inputs is available using the 67 <para>Information about video inputs is available using the
68&VIDIOC-ENUMINPUT; ioctl.</para> 68&VIDIOC-ENUMINPUT; ioctl.</para>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-output.xml b/Documentation/DocBook/media/v4l/vidioc-g-output.xml
index fd45f1c13ccf..4533068ecb8a 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-output.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-output.xml
@@ -61,8 +61,9 @@ desired output in an integer and call the
61<constant>VIDIOC_S_OUTPUT</constant> ioctl with a pointer to this integer. 61<constant>VIDIOC_S_OUTPUT</constant> ioctl with a pointer to this integer.
62Side effects are possible. For example outputs may support different 62Side effects are possible. For example outputs may support different
63video standards, so the driver may implicitly switch the current 63video standards, so the driver may implicitly switch the current
64standard. It is good practice to select an output before querying or 64standard.
65negotiating any other parameters.</para> 65standard. Because of these possible side effects applications
66must select an output before querying or negotiating any other parameters.</para>
66 67
67 <para>Information about video outputs is available using the 68 <para>Information about video outputs is available using the
68&VIDIOC-ENUMOUTPUT; ioctl.</para> 69&VIDIOC-ENUMOUTPUT; ioctl.</para>
diff --git a/Documentation/acpi/apei/einj.txt b/Documentation/acpi/apei/einj.txt
index 5cc699ba5453..e7cc36397217 100644
--- a/Documentation/acpi/apei/einj.txt
+++ b/Documentation/acpi/apei/einj.txt
@@ -47,20 +47,53 @@ directory apei/einj. The following files are provided.
47 47
48- param1 48- param1
49 This file is used to set the first error parameter value. Effect of 49 This file is used to set the first error parameter value. Effect of
50 parameter depends on error_type specified. For memory error, this is 50 parameter depends on error_type specified.
51 physical memory address. Only available if param_extension module
52 parameter is specified.
53 51
54- param2 52- param2
55 This file is used to set the second error parameter value. Effect of 53 This file is used to set the second error parameter value. Effect of
56 parameter depends on error_type specified. For memory error, this is 54 parameter depends on error_type specified.
57 physical memory address mask. Only available if param_extension 55
58 module parameter is specified. 56BIOS versions based in the ACPI 4.0 specification have limited options
57to control where the errors are injected. Your BIOS may support an
58extension (enabled with the param_extension=1 module parameter, or
59boot command line einj.param_extension=1). This allows the address
60and mask for memory injections to be specified by the param1 and
61param2 files in apei/einj.
62
63BIOS versions using the ACPI 5.0 specification have more control over
64the target of the injection. For processor related errors (type 0x1,
650x2 and 0x4) the APICID of the target should be provided using the
66param1 file in apei/einj. For memory errors (type 0x8, 0x10 and 0x20)
67the address is set using param1 with a mask in param2 (0x0 is equivalent
68to all ones). For PCI express errors (type 0x40, 0x80 and 0x100) the
69segment, bus, device and function are specified using param1:
70
71 31 24 23 16 15 11 10 8 7 0
72 +-------------------------------------------------+
73 | segment | bus | device | function | reserved |
74 +-------------------------------------------------+
75
76An ACPI 5.0 BIOS may also allow vendor specific errors to be injected.
77In this case a file named vendor will contain identifying information
78from the BIOS that hopefully will allow an application wishing to use
79the vendor specific extension to tell that they are running on a BIOS
80that supports it. All vendor extensions have the 0x80000000 bit set in
81error_type. A file vendor_flags controls the interpretation of param1
82and param2 (1 = PROCESSOR, 2 = MEMORY, 4 = PCI). See your BIOS vendor
83documentation for details (and expect changes to this API if vendors
84creativity in using this feature expands beyond our expectations).
85
86Example:
87# cd /sys/kernel/debug/apei/einj
88# cat available_error_type # See which errors can be injected
890x00000002 Processor Uncorrectable non-fatal
900x00000008 Memory Correctable
910x00000010 Memory Uncorrectable non-fatal
92# echo 0x12345000 > param1 # Set memory address for injection
93# echo 0xfffffffffffff000 > param2 # Mask - anywhere in this page
94# echo 0x8 > error_type # Choose correctable memory error
95# echo 1 > error_inject # Inject now
59 96
60Injecting parameter support is a BIOS version specific extension, that
61is, it only works on some BIOS version. If you want to use it, please
62make sure your BIOS version has the proper support and specify
63"param_extension=y" in module parameter.
64 97
65For more information about EINJ, please refer to ACPI specification 98For more information about EINJ, please refer to ACPI specification
66version 4.0, section 17.5. 99version 4.0, section 17.5 and ACPI 5.0, section 18.6.
diff --git a/Documentation/devicetree/bindings/i2c/omap-i2c.txt b/Documentation/devicetree/bindings/i2c/omap-i2c.txt
new file mode 100644
index 000000000000..56564aa4b444
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/omap-i2c.txt
@@ -0,0 +1,30 @@
1I2C for OMAP platforms
2
3Required properties :
4- compatible : Must be "ti,omap3-i2c" or "ti,omap4-i2c"
5- ti,hwmods : Must be "i2c<n>", n being the instance number (1-based)
6- #address-cells = <1>;
7- #size-cells = <0>;
8
9Recommended properties :
10- clock-frequency : Desired I2C bus clock frequency in Hz. Otherwise
11 the default 100 kHz frequency will be used.
12
13Optional properties:
14- Child nodes conforming to i2c bus binding
15
16Note: Current implementation will fetch base address, irq and dma
17from omap hwmod data base during device registration.
18Future plan is to migrate hwmod data base contents into device tree
19blob so that, all the required data will be used from device tree dts
20file.
21
22Examples :
23
24i2c1: i2c@0 {
25 compatible = "ti,omap3-i2c";
26 #address-cells = <1>;
27 #size-cells = <0>;
28 ti,hwmods = "i2c1";
29 clock-frequency = <400000>;
30};
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index d725c0dfe032..1bea46a54b1c 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -439,17 +439,6 @@ Who: Jean Delvare <khali@linux-fr.org>
439 439
440---------------------------- 440----------------------------
441 441
442What: For VIDIOC_S_FREQUENCY the type field must match the device node's type.
443 If not, return -EINVAL.
444When: 3.2
445Why: It makes no sense to switch the tuner to radio mode by calling
446 VIDIOC_S_FREQUENCY on a video node, or to switch the tuner to tv mode by
447 calling VIDIOC_S_FREQUENCY on a radio node. This is the first step of a
448 move to more consistent handling of tv and radio tuners.
449Who: Hans Verkuil <hans.verkuil@cisco.com>
450
451----------------------------
452
453What: Opening a radio device node will no longer automatically switch the 442What: Opening a radio device node will no longer automatically switch the
454 tuner mode from tv to radio. 443 tuner mode from tv to radio.
455When: 3.3 444When: 3.3
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 54078ed96b37..4840334ea97b 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -149,6 +149,7 @@ Code Seq#(hex) Include File Comments
149'M' 01-03 drivers/scsi/megaraid/megaraid_sas.h 149'M' 01-03 drivers/scsi/megaraid/megaraid_sas.h
150'M' 00-0F drivers/video/fsl-diu-fb.h conflict! 150'M' 00-0F drivers/video/fsl-diu-fb.h conflict!
151'N' 00-1F drivers/usb/scanner.h 151'N' 00-1F drivers/usb/scanner.h
152'N' 40-7F drivers/block/nvme.c
152'O' 00-06 mtd/ubi-user.h UBI 153'O' 00-06 mtd/ubi-user.h UBI
153'P' all linux/soundcard.h conflict! 154'P' all linux/soundcard.h conflict!
154'P' 60-6F sound/sscape_ioctl.h conflict! 155'P' 60-6F sound/sscape_ioctl.h conflict!
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index b29f3c416296..033d4e69b43b 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1059,6 +1059,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1059 By default, super page will be supported if Intel IOMMU 1059 By default, super page will be supported if Intel IOMMU
1060 has the capability. With this option, super page will 1060 has the capability. With this option, super page will
1061 not be supported. 1061 not be supported.
1062
1063 intel_idle.max_cstate= [KNL,HW,ACPI,X86]
1064 0 disables intel_idle and fall back on acpi_idle.
1065 1 to 6 specify maximum depth of C-state.
1066
1062 intremap= [X86-64, Intel-IOMMU] 1067 intremap= [X86-64, Intel-IOMMU]
1063 on enable Interrupt Remapping (default) 1068 on enable Interrupt Remapping (default)
1064 off disable Interrupt Remapping 1069 off disable Interrupt Remapping
diff --git a/Documentation/power/basic-pm-debugging.txt b/Documentation/power/basic-pm-debugging.txt
index 40a4c65f380a..262acf56fa79 100644
--- a/Documentation/power/basic-pm-debugging.txt
+++ b/Documentation/power/basic-pm-debugging.txt
@@ -15,7 +15,7 @@ test at least a couple of times in a row for confidence. [This is necessary,
15because some problems only show up on a second attempt at suspending and 15because some problems only show up on a second attempt at suspending and
16resuming the system.] Moreover, hibernating in the "reboot" and "shutdown" 16resuming the system.] Moreover, hibernating in the "reboot" and "shutdown"
17modes causes the PM core to skip some platform-related callbacks which on ACPI 17modes causes the PM core to skip some platform-related callbacks which on ACPI
18systems might be necessary to make hibernation work. Thus, if you machine fails 18systems might be necessary to make hibernation work. Thus, if your machine fails
19to hibernate or resume in the "reboot" mode, you should try the "platform" mode: 19to hibernate or resume in the "reboot" mode, you should try the "platform" mode:
20 20
21# echo platform > /sys/power/disk 21# echo platform > /sys/power/disk
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
index 6ccb68f68da6..ebd7490ef1df 100644
--- a/Documentation/power/freezing-of-tasks.txt
+++ b/Documentation/power/freezing-of-tasks.txt
@@ -120,10 +120,10 @@ So in practice, the 'at all' may become a 'why freeze kernel threads?' and
120freezing user threads I don't find really objectionable." 120freezing user threads I don't find really objectionable."
121 121
122Still, there are kernel threads that may want to be freezable. For example, if 122Still, there are kernel threads that may want to be freezable. For example, if
123a kernel that belongs to a device driver accesses the device directly, it in 123a kernel thread that belongs to a device driver accesses the device directly, it
124principle needs to know when the device is suspended, so that it doesn't try to 124in principle needs to know when the device is suspended, so that it doesn't try
125access it at that time. However, if the kernel thread is freezable, it will be 125to access it at that time. However, if the kernel thread is freezable, it will
126frozen before the driver's .suspend() callback is executed and it will be 126be frozen before the driver's .suspend() callback is executed and it will be
127thawed after the driver's .resume() callback has run, so it won't be accessing 127thawed after the driver's .resume() callback has run, so it won't be accessing
128the device while it's suspended. 128the device while it's suspended.
129 129
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 64adb98b181c..57566bacb4c5 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,13 @@
1Release Date : Fri. Jan 6, 2012 17:00:00 PST 2010 -
2 (emaild-id:megaraidlinux@lsi.com)
3 Adam Radford
4Current Version : 00.00.06.14-rc1
5Old Version : 00.00.06.12-rc1
6 1. Fix reglockFlags for degraded raid5/6 for MR 9360/9380.
7 2. Mask off flags in ioctl path to prevent memory scribble with older
8 MegaCLI versions.
9 3. Remove poll_mode_io module paramater, sysfs node, and associated code.
10-------------------------------------------------------------------------------
1Release Date : Wed. Oct 5, 2011 17:00:00 PST 2010 - 11Release Date : Wed. Oct 5, 2011 17:00:00 PST 2010 -
2 (emaild-id:megaraidlinux@lsi.com) 12 (emaild-id:megaraidlinux@lsi.com)
3 Adam Radford 13 Adam Radford
diff --git a/Documentation/scsi/LICENSE.qla4xxx b/Documentation/scsi/LICENSE.qla4xxx
index 494980e40491..ab899591ecb7 100644
--- a/Documentation/scsi/LICENSE.qla4xxx
+++ b/Documentation/scsi/LICENSE.qla4xxx
@@ -1,32 +1,11 @@
1Copyright (c) 2003-2011 QLogic Corporation 1Copyright (c) 2003-2011 QLogic Corporation
2QLogic Linux iSCSI HBA Driver 2QLogic Linux iSCSI Driver
3 3
4This program includes a device driver for Linux 3.x. 4This program includes a device driver for Linux 3.x.
5You may modify and redistribute the device driver code under the 5You may modify and redistribute the device driver code under the
6GNU General Public License (a copy of which is attached hereto as 6GNU General Public License (a copy of which is attached hereto as
7Exhibit A) published by the Free Software Foundation (version 2). 7Exhibit A) published by the Free Software Foundation (version 2).
8 8
9REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
10THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
11EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
12IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
13PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
14BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
16TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
17DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
18ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
19OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
20OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21POSSIBILITY OF SUCH DAMAGE.
22
23USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
24CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
25OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
26TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
27ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
28COMBINATION WITH THIS PROGRAM.
29
30 9
31EXHIBIT A 10EXHIBIT A
32 11
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 7ef9b843d529..6e21b8b52638 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -230,14 +230,9 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
230 buf += "#include <linux/ctype.h>\n" 230 buf += "#include <linux/ctype.h>\n"
231 buf += "#include <asm/unaligned.h>\n\n" 231 buf += "#include <asm/unaligned.h>\n\n"
232 buf += "#include <target/target_core_base.h>\n" 232 buf += "#include <target/target_core_base.h>\n"
233 buf += "#include <target/target_core_transport.h>\n" 233 buf += "#include <target/target_core_fabric.h>\n"
234 buf += "#include <target/target_core_fabric_ops.h>\n"
235 buf += "#include <target/target_core_fabric_configfs.h>\n" 234 buf += "#include <target/target_core_fabric_configfs.h>\n"
236 buf += "#include <target/target_core_fabric_lib.h>\n"
237 buf += "#include <target/target_core_device.h>\n"
238 buf += "#include <target/target_core_tpg.h>\n"
239 buf += "#include <target/target_core_configfs.h>\n" 235 buf += "#include <target/target_core_configfs.h>\n"
240 buf += "#include <target/target_core_base.h>\n"
241 buf += "#include <target/configfs_macros.h>\n\n" 236 buf += "#include <target/configfs_macros.h>\n\n"
242 buf += "#include \"" + fabric_mod_name + "_base.h\"\n" 237 buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
243 buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" 238 buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
@@ -260,7 +255,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
260 buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" 255 buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
261 buf += " return ERR_PTR(-EINVAL); */\n" 256 buf += " return ERR_PTR(-EINVAL); */\n"
262 buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n" 257 buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
263 buf += " if (!(se_nacl_new))\n" 258 buf += " if (!se_nacl_new)\n"
264 buf += " return ERR_PTR(-ENOMEM);\n" 259 buf += " return ERR_PTR(-ENOMEM);\n"
265 buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n" 260 buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
266 buf += " nexus_depth = 1;\n" 261 buf += " nexus_depth = 1;\n"
@@ -308,7 +303,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
308 buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n" 303 buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
309 buf += " return ERR_PTR(-EINVAL);\n\n" 304 buf += " return ERR_PTR(-EINVAL);\n\n"
310 buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n" 305 buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
311 buf += " if (!(tpg)) {\n" 306 buf += " if (!tpg) {\n"
312 buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n" 307 buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
313 buf += " return ERR_PTR(-ENOMEM);\n" 308 buf += " return ERR_PTR(-ENOMEM);\n"
314 buf += " }\n" 309 buf += " }\n"
@@ -344,7 +339,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
344 buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" 339 buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
345 buf += " return ERR_PTR(-EINVAL); */\n\n" 340 buf += " return ERR_PTR(-EINVAL); */\n\n"
346 buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n" 341 buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
347 buf += " if (!(" + fabric_mod_port + ")) {\n" 342 buf += " if (!" + fabric_mod_port + ") {\n"
348 buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n" 343 buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
349 buf += " return ERR_PTR(-ENOMEM);\n" 344 buf += " return ERR_PTR(-ENOMEM);\n"
350 buf += " }\n" 345 buf += " }\n"
@@ -352,7 +347,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
352 if proto_ident == "FC" or proto_ident == "SAS": 347 if proto_ident == "FC" or proto_ident == "SAS":
353 buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n" 348 buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
354 349
355 buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n" 350 buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
356 buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n" 351 buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
357 buf += "}\n\n" 352 buf += "}\n\n"
358 buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n" 353 buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
@@ -391,8 +386,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
391 buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n" 386 buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
392 buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n" 387 buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
393 buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n" 388 buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
394 buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n" 389 buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
395 buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n"
396 buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n" 390 buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
397 buf += " .close_session = " + fabric_mod_name + "_close_session,\n" 391 buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
398 buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n" 392 buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
@@ -405,14 +399,12 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
405 buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n" 399 buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
406 buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n" 400 buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
407 buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n" 401 buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
408 buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n"
409 buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" 402 buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
410 buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n" 403 buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
411 buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n" 404 buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
412 buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n" 405 buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
413 buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n" 406 buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
414 buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n" 407 buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
415 buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n"
416 buf += " /*\n" 408 buf += " /*\n"
417 buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n" 409 buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
418 buf += " */\n" 410 buf += " */\n"
@@ -439,9 +431,9 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
439 buf += " * Register the top level struct config_item_type with TCM core\n" 431 buf += " * Register the top level struct config_item_type with TCM core\n"
440 buf += " */\n" 432 buf += " */\n"
441 buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n" 433 buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
442 buf += " if (!(fabric)) {\n" 434 buf += " if (IS_ERR(fabric)) {\n"
443 buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n" 435 buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
444 buf += " return -ENOMEM;\n" 436 buf += " return PTR_ERR(fabric);\n"
445 buf += " }\n" 437 buf += " }\n"
446 buf += " /*\n" 438 buf += " /*\n"
447 buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n" 439 buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
@@ -475,9 +467,9 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
475 buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n" 467 buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
476 buf += " return 0;\n" 468 buf += " return 0;\n"
477 buf += "};\n\n" 469 buf += "};\n\n"
478 buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n" 470 buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
479 buf += "{\n" 471 buf += "{\n"
480 buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n" 472 buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
481 buf += " return;\n\n" 473 buf += " return;\n\n"
482 buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n" 474 buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
483 buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n" 475 buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
@@ -492,17 +484,15 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
492 buf += " return ret;\n\n" 484 buf += " return ret;\n\n"
493 buf += " return 0;\n" 485 buf += " return 0;\n"
494 buf += "};\n\n" 486 buf += "};\n\n"
495 buf += "static void " + fabric_mod_name + "_exit(void)\n" 487 buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
496 buf += "{\n" 488 buf += "{\n"
497 buf += " " + fabric_mod_name + "_deregister_configfs();\n" 489 buf += " " + fabric_mod_name + "_deregister_configfs();\n"
498 buf += "};\n\n" 490 buf += "};\n\n"
499 491
500 buf += "#ifdef MODULE\n"
501 buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n" 492 buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
502 buf += "MODULE_LICENSE(\"GPL\");\n" 493 buf += "MODULE_LICENSE(\"GPL\");\n"
503 buf += "module_init(" + fabric_mod_name + "_init);\n" 494 buf += "module_init(" + fabric_mod_name + "_init);\n"
504 buf += "module_exit(" + fabric_mod_name + "_exit);\n" 495 buf += "module_exit(" + fabric_mod_name + "_exit);\n"
505 buf += "#endif\n"
506 496
507 ret = p.write(buf) 497 ret = p.write(buf)
508 if ret: 498 if ret:
@@ -514,7 +504,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
514 504
515def tcm_mod_scan_fabric_ops(tcm_dir): 505def tcm_mod_scan_fabric_ops(tcm_dir):
516 506
517 fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h" 507 fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
518 508
519 print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api 509 print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
520 process_fo = 0; 510 process_fo = 0;
@@ -579,11 +569,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
579 buf += "#include <scsi/scsi_cmnd.h>\n" 569 buf += "#include <scsi/scsi_cmnd.h>\n"
580 buf += "#include <scsi/libfc.h>\n\n" 570 buf += "#include <scsi/libfc.h>\n\n"
581 buf += "#include <target/target_core_base.h>\n" 571 buf += "#include <target/target_core_base.h>\n"
582 buf += "#include <target/target_core_transport.h>\n" 572 buf += "#include <target/target_core_fabric.h>\n"
583 buf += "#include <target/target_core_fabric_ops.h>\n"
584 buf += "#include <target/target_core_fabric_lib.h>\n"
585 buf += "#include <target/target_core_device.h>\n"
586 buf += "#include <target/target_core_tpg.h>\n"
587 buf += "#include <target/target_core_configfs.h>\n\n" 573 buf += "#include <target/target_core_configfs.h>\n\n"
588 buf += "#include \"" + fabric_mod_name + "_base.h\"\n" 574 buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
589 buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" 575 buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
@@ -788,7 +774,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
788 buf += "{\n" 774 buf += "{\n"
789 buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n" 775 buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
790 buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n" 776 buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
791 buf += " if (!(nacl)) {\n" 777 buf += " if (!nacl) {\n"
792 buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n" 778 buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
793 buf += " return NULL;\n" 779 buf += " return NULL;\n"
794 buf += " }\n\n" 780 buf += " }\n\n"
@@ -815,7 +801,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
815 buf += "}\n\n" 801 buf += "}\n\n"
816 bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n" 802 bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
817 803
818 if re.search('release_cmd_to_pool', fo): 804 if re.search('\*release_cmd\)\(', fo):
819 buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n" 805 buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
820 buf += "{\n" 806 buf += "{\n"
821 buf += " return;\n" 807 buf += " return;\n"
@@ -899,13 +885,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
899 buf += "}\n\n" 885 buf += "}\n\n"
900 bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n" 886 bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
901 887
902 if re.search('new_cmd_failure\)\(', fo):
903 buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
904 buf += "{\n"
905 buf += " return;\n"
906 buf += "}\n\n"
907 bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
908
909 if re.search('queue_data_in\)\(', fo): 888 if re.search('queue_data_in\)\(', fo):
910 buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n" 889 buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
911 buf += "{\n" 890 buf += "{\n"
@@ -948,15 +927,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
948 buf += "}\n\n" 927 buf += "}\n\n"
949 bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n" 928 bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
950 929
951 if re.search('pack_lun\)\(', fo):
952 buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
953 buf += "{\n"
954 buf += " WARN_ON(lun >= 256);\n"
955 buf += " /* Caller wants this byte-swapped */\n"
956 buf += " return cpu_to_le64((lun & 0xff) << 8);\n"
957 buf += "}\n\n"
958 bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
959
960 930
961 ret = p.write(buf) 931 ret = p.write(buf)
962 if ret: 932 if ret:
diff --git a/Documentation/video4linux/v4l2-controls.txt b/Documentation/video4linux/v4l2-controls.txt
index 26aa0573933e..e2492a9d1027 100644
--- a/Documentation/video4linux/v4l2-controls.txt
+++ b/Documentation/video4linux/v4l2-controls.txt
@@ -666,27 +666,6 @@ a control of this type whenever the first control belonging to a new control
666class is added. 666class is added.
667 667
668 668
669Differences from the Spec
670=========================
671
672There are a few places where the framework acts slightly differently from the
673V4L2 Specification. Those differences are described in this section. We will
674have to see whether we need to adjust the spec or not.
675
6761) It is no longer required to have all controls contained in a
677v4l2_ext_control array be from the same control class. The framework will be
678able to handle any type of control in the array. You need to set ctrl_class
679to 0 in order to enable this. If ctrl_class is non-zero, then it will still
680check that all controls belong to that control class.
681
682If you set ctrl_class to 0 and count to 0, then it will only return an error
683if there are no controls at all.
684
6852) Clarified the way error_idx works. For get and set it will be equal to
686count if nothing was done yet. If it is less than count then only the controls
687up to error_idx-1 were successfully applied.
688
689
690Proposals for Extensions 669Proposals for Extensions
691======================== 670========================
692 671
diff --git a/MAINTAINERS b/MAINTAINERS
index fa3f5e6a608b..93c68d5f1cf4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2246,6 +2246,17 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/teigland/dlm.git
2246S: Supported 2246S: Supported
2247F: fs/dlm/ 2247F: fs/dlm/
2248 2248
2249DMA BUFFER SHARING FRAMEWORK
2250M: Sumit Semwal <sumit.semwal@linaro.org>
2251S: Maintained
2252L: linux-media@vger.kernel.org
2253L: dri-devel@lists.freedesktop.org
2254L: linaro-mm-sig@lists.linaro.org
2255F: drivers/base/dma-buf*
2256F: include/linux/dma-buf*
2257F: Documentation/dma-buf-sharing.txt
2258T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
2259
2249DMA GENERIC OFFLOAD ENGINE SUBSYSTEM 2260DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
2250M: Vinod Koul <vinod.koul@intel.com> 2261M: Vinod Koul <vinod.koul@intel.com>
2251M: Dan Williams <dan.j.williams@intel.com> 2262M: Dan Williams <dan.j.williams@intel.com>
@@ -7200,7 +7211,7 @@ S: Maintained
7200F: drivers/net/vmxnet3/ 7211F: drivers/net/vmxnet3/
7201 7212
7202VMware PVSCSI driver 7213VMware PVSCSI driver
7203M: Alok Kataria <akataria@vmware.com> 7214M: Arvind Kumar <arvindkumar@vmware.com>
7204M: VMware PV-Drivers <pv-drivers@vmware.com> 7215M: VMware PV-Drivers <pv-drivers@vmware.com>
7205L: linux-scsi@vger.kernel.org 7216L: linux-scsi@vger.kernel.org
7206S: Maintained 7217S: Maintained
diff --git a/Makefile b/Makefile
index 156ac69c961e..71e6ed21dd15 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 2 2PATCHLEVEL = 3
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = 4EXTRAVERSION = -rc1
5NAME = Saber-toothed Squirrel 5NAME = Saber-toothed Squirrel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 40319d91bb7f..1683bfb9166f 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -160,7 +160,6 @@ machine-$(CONFIG_ARCH_MSM) := msm
160machine-$(CONFIG_ARCH_MV78XX0) := mv78xx0 160machine-$(CONFIG_ARCH_MV78XX0) := mv78xx0
161machine-$(CONFIG_ARCH_IMX_V4_V5) := imx 161machine-$(CONFIG_ARCH_IMX_V4_V5) := imx
162machine-$(CONFIG_ARCH_IMX_V6_V7) := imx 162machine-$(CONFIG_ARCH_IMX_V6_V7) := imx
163machine-$(CONFIG_ARCH_MX5) := mx5
164machine-$(CONFIG_ARCH_MXS) := mxs 163machine-$(CONFIG_ARCH_MXS) := mxs
165machine-$(CONFIG_ARCH_NETX) := netx 164machine-$(CONFIG_ARCH_NETX) := netx
166machine-$(CONFIG_ARCH_NOMADIK) := nomadik 165machine-$(CONFIG_ARCH_NOMADIK) := nomadik
diff --git a/arch/arm/configs/mx5_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index d0d8dfece37e..3a4fb2e5fc68 100644
--- a/arch/arm/configs/mx5_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -3,6 +3,7 @@ CONFIG_EXPERIMENTAL=y
3CONFIG_KERNEL_LZO=y 3CONFIG_KERNEL_LZO=y
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
5CONFIG_LOG_BUF_SHIFT=18 5CONFIG_LOG_BUF_SHIFT=18
6CONFIG_CGROUPS=y
6CONFIG_RELAY=y 7CONFIG_RELAY=y
7CONFIG_EXPERT=y 8CONFIG_EXPERT=y
8# CONFIG_SLUB_DEBUG is not set 9# CONFIG_SLUB_DEBUG is not set
@@ -14,20 +15,31 @@ CONFIG_MODULE_SRCVERSION_ALL=y
14# CONFIG_LBDAF is not set 15# CONFIG_LBDAF is not set
15# CONFIG_BLK_DEV_BSG is not set 16# CONFIG_BLK_DEV_BSG is not set
16CONFIG_ARCH_MXC=y 17CONFIG_ARCH_MXC=y
17CONFIG_ARCH_MX5=y 18CONFIG_MACH_MX31LILLY=y
18CONFIG_MACH_MX51_BABBAGE=y 19CONFIG_MACH_MX31LITE=y
20CONFIG_MACH_PCM037=y
21CONFIG_MACH_PCM037_EET=y
22CONFIG_MACH_MX31_3DS=y
23CONFIG_MACH_MX31MOBOARD=y
24CONFIG_MACH_QONG=y
25CONFIG_MACH_ARMADILLO5X0=y
26CONFIG_MACH_KZM_ARM11_01=y
27CONFIG_MACH_PCM043=y
28CONFIG_MACH_MX35_3DS=y
29CONFIG_MACH_EUKREA_CPUIMX35=y
30CONFIG_MACH_VPR200=y
31CONFIG_MACH_IMX51_DT=y
19CONFIG_MACH_MX51_3DS=y 32CONFIG_MACH_MX51_3DS=y
20CONFIG_MACH_EUKREA_CPUIMX51=y 33CONFIG_MACH_EUKREA_CPUIMX51=y
21CONFIG_MACH_EUKREA_CPUIMX51SD=y 34CONFIG_MACH_EUKREA_CPUIMX51SD=y
22CONFIG_MACH_MX51_EFIKAMX=y 35CONFIG_MACH_MX51_EFIKAMX=y
23CONFIG_MACH_MX51_EFIKASB=y 36CONFIG_MACH_MX51_EFIKASB=y
24CONFIG_MACH_MX53_EVK=y 37CONFIG_MACH_IMX53_DT=y
25CONFIG_MACH_MX53_SMD=y 38CONFIG_SOC_IMX6Q=y
26CONFIG_MACH_MX53_LOCO=y
27CONFIG_MACH_MX53_ARD=y
28CONFIG_MXC_PWM=y 39CONFIG_MXC_PWM=y
29CONFIG_NO_HZ=y 40CONFIG_NO_HZ=y
30CONFIG_HIGH_RES_TIMERS=y 41CONFIG_HIGH_RES_TIMERS=y
42CONFIG_SMP=y
31CONFIG_VMSPLIT_2G=y 43CONFIG_VMSPLIT_2G=y
32CONFIG_PREEMPT_VOLUNTARY=y 44CONFIG_PREEMPT_VOLUNTARY=y
33CONFIG_AEABI=y 45CONFIG_AEABI=y
@@ -49,7 +61,7 @@ CONFIG_IP_PNP_DHCP=y
49# CONFIG_INET_XFRM_MODE_TUNNEL is not set 61# CONFIG_INET_XFRM_MODE_TUNNEL is not set
50# CONFIG_INET_XFRM_MODE_BEET is not set 62# CONFIG_INET_XFRM_MODE_BEET is not set
51# CONFIG_INET_LRO is not set 63# CONFIG_INET_LRO is not set
52# CONFIG_IPV6 is not set 64CONFIG_IPV6=y
53# CONFIG_WIRELESS is not set 65# CONFIG_WIRELESS is not set
54CONFIG_DEVTMPFS=y 66CONFIG_DEVTMPFS=y
55CONFIG_DEVTMPFS_MOUNT=y 67CONFIG_DEVTMPFS_MOUNT=y
@@ -68,24 +80,20 @@ CONFIG_SCSI_SCAN_ASYNC=y
68CONFIG_ATA=y 80CONFIG_ATA=y
69CONFIG_PATA_IMX=y 81CONFIG_PATA_IMX=y
70CONFIG_NETDEVICES=y 82CONFIG_NETDEVICES=y
71CONFIG_MII=m 83# CONFIG_NET_VENDOR_BROADCOM is not set
72CONFIG_MARVELL_PHY=y 84# CONFIG_NET_VENDOR_CHELSIO is not set
73CONFIG_DAVICOM_PHY=y 85# CONFIG_NET_VENDOR_FARADAY is not set
74CONFIG_QSEMI_PHY=y 86CONFIG_FEC=y
75CONFIG_LXT_PHY=y 87# CONFIG_NET_VENDOR_INTEL is not set
76CONFIG_CICADA_PHY=y 88# CONFIG_NET_VENDOR_MARVELL is not set
77CONFIG_VITESSE_PHY=y 89# CONFIG_NET_VENDOR_MICREL is not set
78CONFIG_SMSC_PHY=y 90# CONFIG_NET_VENDOR_MICROCHIP is not set
79CONFIG_BROADCOM_PHY=y 91# CONFIG_NET_VENDOR_NATSEMI is not set
80CONFIG_ICPLUS_PHY=y 92# CONFIG_NET_VENDOR_SEEQ is not set
81CONFIG_REALTEK_PHY=y 93CONFIG_SMC91X=y
82CONFIG_NATIONAL_PHY=y 94CONFIG_SMC911X=y
83CONFIG_STE10XP=y 95CONFIG_SMSC911X=y
84CONFIG_LSI_ET1011C_PHY=y 96# CONFIG_NET_VENDOR_STMICRO is not set
85CONFIG_MICREL_PHY=y
86CONFIG_NET_ETHERNET=y
87# CONFIG_NETDEV_1000 is not set
88# CONFIG_NETDEV_10000 is not set
89# CONFIG_WLAN is not set 97# CONFIG_WLAN is not set
90# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 98# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
91CONFIG_INPUT_EVDEV=y 99CONFIG_INPUT_EVDEV=y
@@ -124,7 +132,6 @@ CONFIG_USB_EHCI_HCD=y
124CONFIG_USB_EHCI_MXC=y 132CONFIG_USB_EHCI_MXC=y
125CONFIG_USB_STORAGE=y 133CONFIG_USB_STORAGE=y
126CONFIG_MMC=y 134CONFIG_MMC=y
127CONFIG_MMC_BLOCK=m
128CONFIG_MMC_SDHCI=y 135CONFIG_MMC_SDHCI=y
129CONFIG_MMC_SDHCI_PLTFM=y 136CONFIG_MMC_SDHCI_PLTFM=y
130CONFIG_MMC_SDHCI_ESDHC_IMX=y 137CONFIG_MMC_SDHCI_ESDHC_IMX=y
@@ -133,6 +140,8 @@ CONFIG_LEDS_CLASS=y
133CONFIG_RTC_CLASS=y 140CONFIG_RTC_CLASS=y
134CONFIG_RTC_INTF_DEV_UIE_EMUL=y 141CONFIG_RTC_INTF_DEV_UIE_EMUL=y
135CONFIG_RTC_MXC=y 142CONFIG_RTC_MXC=y
143CONFIG_DMADEVICES=y
144CONFIG_IMX_SDMA=y
136CONFIG_EXT2_FS=y 145CONFIG_EXT2_FS=y
137CONFIG_EXT2_FS_XATTR=y 146CONFIG_EXT2_FS_XATTR=y
138CONFIG_EXT2_FS_POSIX_ACL=y 147CONFIG_EXT2_FS_POSIX_ACL=y
diff --git a/arch/arm/configs/mx3_defconfig b/arch/arm/configs/mx3_defconfig
deleted file mode 100644
index cb0717fbb03d..000000000000
--- a/arch/arm/configs/mx3_defconfig
+++ /dev/null
@@ -1,144 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y
3CONFIG_IKCONFIG=y
4CONFIG_IKCONFIG_PROC=y
5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_EXPERT=y
7CONFIG_SLAB=y
8CONFIG_MODULES=y
9CONFIG_MODULE_UNLOAD=y
10CONFIG_MODULE_FORCE_UNLOAD=y
11CONFIG_MODVERSIONS=y
12# CONFIG_BLK_DEV_BSG is not set
13CONFIG_ARCH_MXC=y
14CONFIG_MACH_MX31ADS_WM1133_EV1=y
15CONFIG_MACH_MX31LILLY=y
16CONFIG_MACH_MX31LITE=y
17CONFIG_MACH_PCM037=y
18CONFIG_MACH_PCM037_EET=y
19CONFIG_MACH_MX31_3DS=y
20CONFIG_MACH_MX31MOBOARD=y
21CONFIG_MACH_QONG=y
22CONFIG_MACH_ARMADILLO5X0=y
23CONFIG_MACH_KZM_ARM11_01=y
24CONFIG_MACH_PCM043=y
25CONFIG_MACH_MX35_3DS=y
26CONFIG_MACH_EUKREA_CPUIMX35=y
27CONFIG_MXC_IRQ_PRIOR=y
28CONFIG_MXC_PWM=y
29CONFIG_ARM_ERRATA_411920=y
30CONFIG_NO_HZ=y
31CONFIG_HIGH_RES_TIMERS=y
32CONFIG_PREEMPT=y
33CONFIG_AEABI=y
34CONFIG_ZBOOT_ROM_TEXT=0x0
35CONFIG_ZBOOT_ROM_BSS=0x0
36CONFIG_CMDLINE="noinitrd console=ttymxc0,115200 root=/dev/mtdblock2 rw ip=off"
37CONFIG_VFP=y
38CONFIG_PM_DEBUG=y
39CONFIG_NET=y
40CONFIG_PACKET=y
41CONFIG_UNIX=y
42CONFIG_INET=y
43CONFIG_IP_PNP=y
44CONFIG_IP_PNP_DHCP=y
45# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
46# CONFIG_INET_XFRM_MODE_TUNNEL is not set
47# CONFIG_INET_XFRM_MODE_BEET is not set
48# CONFIG_INET_LRO is not set
49# CONFIG_INET_DIAG is not set
50# CONFIG_IPV6 is not set
51CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
52CONFIG_FW_LOADER=m
53CONFIG_MTD=y
54CONFIG_MTD_CMDLINE_PARTS=y
55CONFIG_MTD_CHAR=y
56CONFIG_MTD_BLOCK=y
57CONFIG_MTD_CFI=y
58CONFIG_MTD_PHYSMAP=y
59CONFIG_MTD_NAND=y
60CONFIG_MTD_NAND_MXC=y
61CONFIG_MTD_UBI=y
62# CONFIG_BLK_DEV is not set
63CONFIG_MISC_DEVICES=y
64CONFIG_EEPROM_AT24=y
65CONFIG_NETDEVICES=y
66CONFIG_SMSC_PHY=y
67CONFIG_NET_ETHERNET=y
68CONFIG_SMSC911X=y
69CONFIG_DNET=y
70# CONFIG_NETDEV_1000 is not set
71# CONFIG_NETDEV_10000 is not set
72# CONFIG_INPUT_MOUSEDEV is not set
73# CONFIG_KEYBOARD_ATKBD is not set
74CONFIG_KEYBOARD_IMX=y
75# CONFIG_INPUT_MOUSE is not set
76# CONFIG_SERIO is not set
77# CONFIG_VT is not set
78# CONFIG_LEGACY_PTYS is not set
79CONFIG_SERIAL_8250=m
80CONFIG_SERIAL_8250_EXTENDED=y
81CONFIG_SERIAL_8250_SHARE_IRQ=y
82CONFIG_SERIAL_IMX=y
83CONFIG_SERIAL_IMX_CONSOLE=y
84# CONFIG_HW_RANDOM is not set
85CONFIG_I2C=y
86CONFIG_I2C_CHARDEV=y
87CONFIG_I2C_IMX=y
88CONFIG_SPI=y
89CONFIG_W1=y
90CONFIG_W1_MASTER_MXC=y
91CONFIG_W1_SLAVE_THERM=y
92# CONFIG_HWMON is not set
93CONFIG_WATCHDOG=y
94CONFIG_IMX2_WDT=y
95CONFIG_MFD_WM8350_I2C=y
96CONFIG_REGULATOR=y
97CONFIG_REGULATOR_WM8350=y
98CONFIG_MEDIA_SUPPORT=y
99CONFIG_VIDEO_DEV=y
100# CONFIG_RC_CORE is not set
101# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
102CONFIG_SOC_CAMERA=y
103CONFIG_SOC_CAMERA_MT9M001=y
104CONFIG_SOC_CAMERA_MT9M111=y
105CONFIG_SOC_CAMERA_MT9T031=y
106CONFIG_SOC_CAMERA_MT9V022=y
107CONFIG_SOC_CAMERA_TW9910=y
108CONFIG_SOC_CAMERA_OV772X=y
109CONFIG_VIDEO_MX3=y
110# CONFIG_RADIO_ADAPTERS is not set
111CONFIG_FB=y
112CONFIG_SOUND=y
113CONFIG_SND=y
114# CONFIG_SND_ARM is not set
115# CONFIG_SND_SPI is not set
116CONFIG_SND_SOC=y
117CONFIG_SND_IMX_SOC=y
118CONFIG_SND_MXC_SOC_WM1133_EV1=y
119CONFIG_SND_SOC_PHYCORE_AC97=y
120CONFIG_SND_SOC_EUKREA_TLV320=y
121CONFIG_USB=y
122CONFIG_USB_EHCI_HCD=y
123CONFIG_USB_EHCI_MXC=y
124CONFIG_USB_GADGET=m
125CONFIG_USB_FSL_USB2=m
126CONFIG_USB_G_SERIAL=m
127CONFIG_USB_ULPI=y
128CONFIG_MMC=y
129CONFIG_MMC_MXC=y
130CONFIG_RTC_CLASS=y
131CONFIG_RTC_MXC=y
132CONFIG_DMADEVICES=y
133# CONFIG_DNOTIFY is not set
134CONFIG_TMPFS=y
135CONFIG_JFFS2_FS=y
136CONFIG_UBIFS_FS=y
137CONFIG_NFS_FS=y
138CONFIG_NFS_V3=y
139CONFIG_NFS_V4=y
140CONFIG_ROOT_NFS=y
141# CONFIG_ENABLE_WARN_DEPRECATED is not set
142# CONFIG_ENABLE_MUST_CHECK is not set
143CONFIG_SYSCTL_SYSCALL_CHECK=y
144# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 0e6de366c648..09f357bcecde 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -22,6 +22,18 @@ config ARCH_MX25
22config MACH_MX27 22config MACH_MX27
23 bool 23 bool
24 24
25config ARCH_MX5
26 bool
27
28config ARCH_MX50
29 bool
30
31config ARCH_MX51
32 bool
33
34config ARCH_MX53
35 bool
36
25config SOC_IMX1 37config SOC_IMX1
26 bool 38 bool
27 select ARCH_MX1 39 select ARCH_MX1
@@ -73,6 +85,32 @@ config SOC_IMX35
73 select MXC_AVIC 85 select MXC_AVIC
74 select SMP_ON_UP if SMP 86 select SMP_ON_UP if SMP
75 87
88config SOC_IMX5
89 select CPU_V7
90 select ARM_L1_CACHE_SHIFT_6
91 select MXC_TZIC
92 select ARCH_MXC_IOMUX_V3
93 select ARCH_MXC_AUDMUX_V2
94 select ARCH_HAS_CPUFREQ
95 select ARCH_MX5
96 bool
97
98config SOC_IMX50
99 bool
100 select SOC_IMX5
101 select ARCH_MX50
102
103config SOC_IMX51
104 bool
105 select SOC_IMX5
106 select ARCH_MX5
107 select ARCH_MX51
108
109config SOC_IMX53
110 bool
111 select SOC_IMX5
112 select ARCH_MX5
113 select ARCH_MX53
76 114
77if ARCH_IMX_V4_V5 115if ARCH_IMX_V4_V5
78 116
@@ -592,6 +630,207 @@ config MACH_VPR200
592 Include support for VPR200 platform. This includes specific 630 Include support for VPR200 platform. This includes specific
593 configurations for the board and its peripherals. 631 configurations for the board and its peripherals.
594 632
633comment "i.MX5 platforms:"
634
635config MACH_MX50_RDP
636 bool "Support MX50 reference design platform"
637 depends on BROKEN
638 select SOC_IMX50
639 select IMX_HAVE_PLATFORM_IMX_I2C
640 select IMX_HAVE_PLATFORM_IMX_UART
641 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
642 select IMX_HAVE_PLATFORM_SPI_IMX
643 help
644 Include support for MX50 reference design platform (RDP) board. This
645 includes specific configurations for the board and its peripherals.
646
647comment "i.MX51 machines:"
648
649config MACH_IMX51_DT
650 bool "Support i.MX51 platforms from device tree"
651 select SOC_IMX51
652 select USE_OF
653 select MACH_MX51_BABBAGE
654 help
655 Include support for Freescale i.MX51 based platforms
656 using the device tree for discovery
657
658config MACH_MX51_BABBAGE
659 bool "Support MX51 BABBAGE platforms"
660 select SOC_IMX51
661 select IMX_HAVE_PLATFORM_FSL_USB2_UDC
662 select IMX_HAVE_PLATFORM_IMX2_WDT
663 select IMX_HAVE_PLATFORM_IMX_I2C
664 select IMX_HAVE_PLATFORM_IMX_UART
665 select IMX_HAVE_PLATFORM_MXC_EHCI
666 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
667 select IMX_HAVE_PLATFORM_SPI_IMX
668 help
669 Include support for MX51 Babbage platform, also known as MX51EVK in
670 u-boot. This includes specific configurations for the board and its
671 peripherals.
672
673config MACH_MX51_3DS
674 bool "Support MX51PDK (3DS)"
675 select SOC_IMX51
676 select IMX_HAVE_PLATFORM_IMX2_WDT
677 select IMX_HAVE_PLATFORM_IMX_KEYPAD
678 select IMX_HAVE_PLATFORM_IMX_UART
679 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
680 select IMX_HAVE_PLATFORM_SPI_IMX
681 select MXC_DEBUG_BOARD
682 help
683 Include support for MX51PDK (3DS) platform. This includes specific
684 configurations for the board and its peripherals.
685
686config MACH_EUKREA_CPUIMX51
687 bool "Support Eukrea CPUIMX51 module"
688 select SOC_IMX51
689 select IMX_HAVE_PLATFORM_FSL_USB2_UDC
690 select IMX_HAVE_PLATFORM_IMX_I2C
691 select IMX_HAVE_PLATFORM_IMX_UART
692 select IMX_HAVE_PLATFORM_MXC_EHCI
693 select IMX_HAVE_PLATFORM_MXC_NAND
694 select IMX_HAVE_PLATFORM_SPI_IMX
695 help
696 Include support for Eukrea CPUIMX51 platform. This includes
697 specific configurations for the module and its peripherals.
698
699choice
700 prompt "Baseboard"
701 depends on MACH_EUKREA_CPUIMX51
702 default MACH_EUKREA_MBIMX51_BASEBOARD
703
704config MACH_EUKREA_MBIMX51_BASEBOARD
705 prompt "Eukrea MBIMX51 development board"
706 bool
707 select IMX_HAVE_PLATFORM_IMX_KEYPAD
708 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
709 select LEDS_GPIO_REGISTER
710 help
711 This adds board specific devices that can be found on Eukrea's
712 MBIMX51 evaluation board.
713
714endchoice
715
716config MACH_EUKREA_CPUIMX51SD
717 bool "Support Eukrea CPUIMX51SD module"
718 select SOC_IMX51
719 select IMX_HAVE_PLATFORM_FSL_USB2_UDC
720 select IMX_HAVE_PLATFORM_IMX_I2C
721 select IMX_HAVE_PLATFORM_IMX_UART
722 select IMX_HAVE_PLATFORM_MXC_EHCI
723 select IMX_HAVE_PLATFORM_MXC_NAND
724 select IMX_HAVE_PLATFORM_SPI_IMX
725 help
726 Include support for Eukrea CPUIMX51SD platform. This includes
727 specific configurations for the module and its peripherals.
728
729choice
730 prompt "Baseboard"
731 depends on MACH_EUKREA_CPUIMX51SD
732 default MACH_EUKREA_MBIMXSD51_BASEBOARD
733
734config MACH_EUKREA_MBIMXSD51_BASEBOARD
735 prompt "Eukrea MBIMXSD development board"
736 bool
737 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
738 select LEDS_GPIO_REGISTER
739 help
740 This adds board specific devices that can be found on Eukrea's
741 MBIMXSD evaluation board.
742
743endchoice
744
745config MX51_EFIKA_COMMON
746 bool
747 select SOC_IMX51
748 select IMX_HAVE_PLATFORM_IMX_UART
749 select IMX_HAVE_PLATFORM_MXC_EHCI
750 select IMX_HAVE_PLATFORM_PATA_IMX
751 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
752 select IMX_HAVE_PLATFORM_SPI_IMX
753 select MXC_ULPI if USB_ULPI
754
755config MACH_MX51_EFIKAMX
756 bool "Support MX51 Genesi Efika MX nettop"
757 select LEDS_GPIO_REGISTER
758 select MX51_EFIKA_COMMON
759 help
760 Include support for Genesi Efika MX nettop. This includes specific
761 configurations for the board and its peripherals.
762
763config MACH_MX51_EFIKASB
764 bool "Support MX51 Genesi Efika Smartbook"
765 select LEDS_GPIO_REGISTER
766 select MX51_EFIKA_COMMON
767 help
768 Include support for Genesi Efika Smartbook. This includes specific
769 configurations for the board and its peripherals.
770
771comment "i.MX53 machines:"
772
773config MACH_IMX53_DT
774 bool "Support i.MX53 platforms from device tree"
775 select SOC_IMX53
776 select USE_OF
777 select MACH_MX53_ARD
778 select MACH_MX53_EVK
779 select MACH_MX53_LOCO
780 select MACH_MX53_SMD
781 help
782 Include support for Freescale i.MX53 based platforms
783 using the device tree for discovery
784
785config MACH_MX53_EVK
786 bool "Support MX53 EVK platforms"
787 select SOC_IMX53
788 select IMX_HAVE_PLATFORM_IMX2_WDT
789 select IMX_HAVE_PLATFORM_IMX_UART
790 select IMX_HAVE_PLATFORM_IMX_I2C
791 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
792 select IMX_HAVE_PLATFORM_SPI_IMX
793 select LEDS_GPIO_REGISTER
794 help
795 Include support for MX53 EVK platform. This includes specific
796 configurations for the board and its peripherals.
797
798config MACH_MX53_SMD
799 bool "Support MX53 SMD platforms"
800 select SOC_IMX53
801 select IMX_HAVE_PLATFORM_IMX2_WDT
802 select IMX_HAVE_PLATFORM_IMX_I2C
803 select IMX_HAVE_PLATFORM_IMX_UART
804 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
805 help
806 Include support for MX53 SMD platform. This includes specific
807 configurations for the board and its peripherals.
808
809config MACH_MX53_LOCO
810 bool "Support MX53 LOCO platforms"
811 select SOC_IMX53
812 select IMX_HAVE_PLATFORM_IMX2_WDT
813 select IMX_HAVE_PLATFORM_IMX_I2C
814 select IMX_HAVE_PLATFORM_IMX_UART
815 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
816 select IMX_HAVE_PLATFORM_GPIO_KEYS
817 select LEDS_GPIO_REGISTER
818 help
819 Include support for MX53 LOCO platform. This includes specific
820 configurations for the board and its peripherals.
821
822config MACH_MX53_ARD
823 bool "Support MX53 ARD platforms"
824 select SOC_IMX53
825 select IMX_HAVE_PLATFORM_IMX2_WDT
826 select IMX_HAVE_PLATFORM_IMX_I2C
827 select IMX_HAVE_PLATFORM_IMX_UART
828 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
829 select IMX_HAVE_PLATFORM_GPIO_KEYS
830 help
831 Include support for MX53 ARD platform. This includes specific
832 configurations for the board and its peripherals.
833
595comment "i.MX6 family:" 834comment "i.MX6 family:"
596 835
597config SOC_IMX6Q 836config SOC_IMX6Q
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index f5920c24f7d7..55db9c488f2b 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_SOC_IMX27) += clock-imx27.o mm-imx27.o ehci-imx27.o
11obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clock-imx31.o iomux-imx31.o ehci-imx31.o 11obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clock-imx31.o iomux-imx31.o ehci-imx31.o
12obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clock-imx35.o ehci-imx35.o 12obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clock-imx35.o ehci-imx35.o
13 13
14obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clock-mx51-mx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o
15
14# Support for CMOS sensor interface 16# Support for CMOS sensor interface
15obj-$(CONFIG_MX1_VIDEO) += mx1-camera-fiq.o mx1-camera-fiq-ksym.o 17obj-$(CONFIG_MX1_VIDEO) += mx1-camera-fiq.o mx1-camera-fiq-ksym.o
16 18
@@ -75,3 +77,22 @@ obj-$(CONFIG_SOC_IMX6Q) += clock-imx6q.o mach-imx6q.o
75ifeq ($(CONFIG_PM),y) 77ifeq ($(CONFIG_PM),y)
76obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o 78obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o
77endif 79endif
80
81# i.MX5 based machines
82obj-$(CONFIG_MACH_MX51_BABBAGE) += mach-mx51_babbage.o
83obj-$(CONFIG_MACH_MX51_3DS) += mach-mx51_3ds.o
84obj-$(CONFIG_MACH_MX53_EVK) += mach-mx53_evk.o
85obj-$(CONFIG_MACH_MX53_SMD) += mach-mx53_smd.o
86obj-$(CONFIG_MACH_MX53_LOCO) += mach-mx53_loco.o
87obj-$(CONFIG_MACH_MX53_ARD) += mach-mx53_ard.o
88obj-$(CONFIG_MACH_EUKREA_CPUIMX51) += mach-cpuimx51.o
89obj-$(CONFIG_MACH_EUKREA_MBIMX51_BASEBOARD) += eukrea_mbimx51-baseboard.o
90obj-$(CONFIG_MACH_EUKREA_CPUIMX51SD) += mach-cpuimx51sd.o
91obj-$(CONFIG_MACH_EUKREA_MBIMXSD51_BASEBOARD) += eukrea_mbimxsd-baseboard.o
92obj-$(CONFIG_MX51_EFIKA_COMMON) += mx51_efika.o
93obj-$(CONFIG_MACH_MX51_EFIKAMX) += mach-mx51_efikamx.o
94obj-$(CONFIG_MACH_MX51_EFIKASB) += mach-mx51_efikasb.o
95obj-$(CONFIG_MACH_MX50_RDP) += mach-mx50_rdp.o
96
97obj-$(CONFIG_MACH_IMX51_DT) += imx51-dt.o
98obj-$(CONFIG_MACH_IMX53_DT) += imx53-dt.o
diff --git a/arch/arm/mach-imx/Makefile.boot b/arch/arm/mach-imx/Makefile.boot
index 5f4d06af4912..6dfdbcc83afd 100644
--- a/arch/arm/mach-imx/Makefile.boot
+++ b/arch/arm/mach-imx/Makefile.boot
@@ -22,6 +22,18 @@ zreladdr-$(CONFIG_SOC_IMX35) += 0x80008000
22params_phys-$(CONFIG_SOC_IMX35) := 0x80000100 22params_phys-$(CONFIG_SOC_IMX35) := 0x80000100
23initrd_phys-$(CONFIG_SOC_IMX35) := 0x80800000 23initrd_phys-$(CONFIG_SOC_IMX35) := 0x80800000
24 24
25zreladdr-$(CONFIG_SOC_IMX50) += 0x70008000
26params_phys-$(CONFIG_SOC_IMX50) := 0x70000100
27initrd_phys-$(CONFIG_SOC_IMX50) := 0x70800000
28
29zreladdr-$(CONFIG_SOC_IMX51) += 0x90008000
30params_phys-$(CONFIG_SOC_IMX51) := 0x90000100
31initrd_phys-$(CONFIG_SOC_IMX51) := 0x90800000
32
33zreladdr-$(CONFIG_SOC_IMX53) += 0x70008000
34params_phys-$(CONFIG_SOC_IMX53) := 0x70000100
35initrd_phys-$(CONFIG_SOC_IMX53) := 0x70800000
36
25zreladdr-$(CONFIG_SOC_IMX6Q) += 0x10008000 37zreladdr-$(CONFIG_SOC_IMX6Q) += 0x10008000
26params_phys-$(CONFIG_SOC_IMX6Q) := 0x10000100 38params_phys-$(CONFIG_SOC_IMX6Q) := 0x10000100
27initrd_phys-$(CONFIG_SOC_IMX6Q) := 0x10800000 39initrd_phys-$(CONFIG_SOC_IMX6Q) := 0x10800000
diff --git a/arch/arm/mach-mx5/clock-mx51-mx53.c b/arch/arm/mach-imx/clock-mx51-mx53.c
index 4cb276977190..08470504a088 100644
--- a/arch/arm/mach-mx5/clock-mx51-mx53.c
+++ b/arch/arm/mach-imx/clock-mx51-mx53.c
@@ -23,7 +23,7 @@
23#include <mach/common.h> 23#include <mach/common.h>
24#include <mach/clock.h> 24#include <mach/clock.h>
25 25
26#include "crm_regs.h" 26#include "crm-regs-imx5.h"
27 27
28/* External clock values passed-in by the board code */ 28/* External clock values passed-in by the board code */
29static unsigned long external_high_reference, external_low_reference; 29static unsigned long external_high_reference, external_low_reference;
diff --git a/arch/arm/mach-mx5/cpu.c b/arch/arm/mach-imx/cpu-imx5.c
index 5e2e7a843860..5e2e7a843860 100644
--- a/arch/arm/mach-mx5/cpu.c
+++ b/arch/arm/mach-imx/cpu-imx5.c
diff --git a/arch/arm/mach-mx5/cpu_op-mx51.c b/arch/arm/mach-imx/cpu_op-mx51.c
index 9d34c3d4c024..9d34c3d4c024 100644
--- a/arch/arm/mach-mx5/cpu_op-mx51.c
+++ b/arch/arm/mach-imx/cpu_op-mx51.c
diff --git a/arch/arm/mach-mx5/cpu_op-mx51.h b/arch/arm/mach-imx/cpu_op-mx51.h
index 97477fecb469..97477fecb469 100644
--- a/arch/arm/mach-mx5/cpu_op-mx51.h
+++ b/arch/arm/mach-imx/cpu_op-mx51.h
diff --git a/arch/arm/mach-mx5/crm_regs.h b/arch/arm/mach-imx/crm-regs-imx5.h
index 5e11ba7daee2..5e11ba7daee2 100644
--- a/arch/arm/mach-mx5/crm_regs.h
+++ b/arch/arm/mach-imx/crm-regs-imx5.h
diff --git a/arch/arm/mach-mx5/devices-imx50.h b/arch/arm/mach-imx/devices-imx50.h
index 7216667eaafc..7216667eaafc 100644
--- a/arch/arm/mach-mx5/devices-imx50.h
+++ b/arch/arm/mach-imx/devices-imx50.h
diff --git a/arch/arm/mach-mx5/devices-imx51.h b/arch/arm/mach-imx/devices-imx51.h
index af488bc0e225..af488bc0e225 100644
--- a/arch/arm/mach-mx5/devices-imx51.h
+++ b/arch/arm/mach-imx/devices-imx51.h
diff --git a/arch/arm/mach-mx5/devices-imx53.h b/arch/arm/mach-imx/devices-imx53.h
index 6e1e5d1f8c3a..6e1e5d1f8c3a 100644
--- a/arch/arm/mach-mx5/devices-imx53.h
+++ b/arch/arm/mach-imx/devices-imx53.h
diff --git a/arch/arm/mach-mx5/efika.h b/arch/arm/mach-imx/efika.h
index 014aa985faae..014aa985faae 100644
--- a/arch/arm/mach-mx5/efika.h
+++ b/arch/arm/mach-imx/efika.h
diff --git a/arch/arm/mach-mx5/ehci.c b/arch/arm/mach-imx/ehci-imx5.c
index c17fa131728b..c17fa131728b 100644
--- a/arch/arm/mach-mx5/ehci.c
+++ b/arch/arm/mach-imx/ehci-imx5.c
diff --git a/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c b/arch/arm/mach-imx/eukrea_mbimx51-baseboard.c
index a6a3ab8f1b1c..a6a3ab8f1b1c 100644
--- a/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c
+++ b/arch/arm/mach-imx/eukrea_mbimx51-baseboard.c
diff --git a/arch/arm/mach-mx5/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-imx/eukrea_mbimxsd-baseboard.c
index d817fc80b986..d817fc80b986 100644
--- a/arch/arm/mach-mx5/eukrea_mbimxsd-baseboard.c
+++ b/arch/arm/mach-imx/eukrea_mbimxsd-baseboard.c
diff --git a/arch/arm/mach-mx5/imx51-dt.c b/arch/arm/mach-imx/imx51-dt.c
index e6bad17b908c..e6bad17b908c 100644
--- a/arch/arm/mach-mx5/imx51-dt.c
+++ b/arch/arm/mach-imx/imx51-dt.c
diff --git a/arch/arm/mach-mx5/imx53-dt.c b/arch/arm/mach-imx/imx53-dt.c
index 05ebb3e68679..05ebb3e68679 100644
--- a/arch/arm/mach-mx5/imx53-dt.c
+++ b/arch/arm/mach-imx/imx53-dt.c
diff --git a/arch/arm/mach-mx5/board-cpuimx51.c b/arch/arm/mach-imx/mach-cpuimx51.c
index 944025da8333..944025da8333 100644
--- a/arch/arm/mach-mx5/board-cpuimx51.c
+++ b/arch/arm/mach-imx/mach-cpuimx51.c
diff --git a/arch/arm/mach-mx5/board-cpuimx51sd.c b/arch/arm/mach-imx/mach-cpuimx51sd.c
index 9fbe923c8b08..9fbe923c8b08 100644
--- a/arch/arm/mach-mx5/board-cpuimx51sd.c
+++ b/arch/arm/mach-imx/mach-cpuimx51sd.c
diff --git a/arch/arm/mach-mx5/board-mx50_rdp.c b/arch/arm/mach-imx/mach-mx50_rdp.c
index 42b66e8d9615..42b66e8d9615 100644
--- a/arch/arm/mach-mx5/board-mx50_rdp.c
+++ b/arch/arm/mach-imx/mach-mx50_rdp.c
diff --git a/arch/arm/mach-mx5/board-mx51_3ds.c b/arch/arm/mach-imx/mach-mx51_3ds.c
index 83eab4176ca4..83eab4176ca4 100644
--- a/arch/arm/mach-mx5/board-mx51_3ds.c
+++ b/arch/arm/mach-imx/mach-mx51_3ds.c
diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-imx/mach-mx51_babbage.c
index e4b822e9f719..e4b822e9f719 100644
--- a/arch/arm/mach-mx5/board-mx51_babbage.c
+++ b/arch/arm/mach-imx/mach-mx51_babbage.c
diff --git a/arch/arm/mach-mx5/board-mx51_efikamx.c b/arch/arm/mach-imx/mach-mx51_efikamx.c
index 3a5ed2dd885a..3a5ed2dd885a 100644
--- a/arch/arm/mach-mx5/board-mx51_efikamx.c
+++ b/arch/arm/mach-imx/mach-mx51_efikamx.c
diff --git a/arch/arm/mach-mx5/board-mx51_efikasb.c b/arch/arm/mach-imx/mach-mx51_efikasb.c
index ea5f65b0381a..ea5f65b0381a 100644
--- a/arch/arm/mach-mx5/board-mx51_efikasb.c
+++ b/arch/arm/mach-imx/mach-mx51_efikasb.c
diff --git a/arch/arm/mach-mx5/board-mx53_ard.c b/arch/arm/mach-imx/mach-mx53_ard.c
index 5f224f1c3eb6..08dfb7628d2d 100644
--- a/arch/arm/mach-mx5/board-mx53_ard.c
+++ b/arch/arm/mach-imx/mach-mx53_ard.c
@@ -32,7 +32,6 @@
32#include <asm/mach/arch.h> 32#include <asm/mach/arch.h>
33#include <asm/mach/time.h> 33#include <asm/mach/time.h>
34 34
35#include "crm_regs.h"
36#include "devices-imx53.h" 35#include "devices-imx53.h"
37 36
38#define ARD_ETHERNET_INT_B IMX_GPIO_NR(2, 31) 37#define ARD_ETHERNET_INT_B IMX_GPIO_NR(2, 31)
diff --git a/arch/arm/mach-mx5/board-mx53_evk.c b/arch/arm/mach-imx/mach-mx53_evk.c
index d6ce137896d6..5a72188b9cdb 100644
--- a/arch/arm/mach-mx5/board-mx53_evk.c
+++ b/arch/arm/mach-imx/mach-mx53_evk.c
@@ -37,7 +37,6 @@
37#define EVK_ECSPI1_CS1 IMX_GPIO_NR(3, 19) 37#define EVK_ECSPI1_CS1 IMX_GPIO_NR(3, 19)
38#define MX53EVK_LED IMX_GPIO_NR(7, 7) 38#define MX53EVK_LED IMX_GPIO_NR(7, 7)
39 39
40#include "crm_regs.h"
41#include "devices-imx53.h" 40#include "devices-imx53.h"
42 41
43static iomux_v3_cfg_t mx53_evk_pads[] = { 42static iomux_v3_cfg_t mx53_evk_pads[] = {
diff --git a/arch/arm/mach-mx5/board-mx53_loco.c b/arch/arm/mach-imx/mach-mx53_loco.c
index fd8b524e1c58..37f67cac15a4 100644
--- a/arch/arm/mach-mx5/board-mx53_loco.c
+++ b/arch/arm/mach-imx/mach-mx53_loco.c
@@ -32,7 +32,6 @@
32#include <asm/mach/arch.h> 32#include <asm/mach/arch.h>
33#include <asm/mach/time.h> 33#include <asm/mach/time.h>
34 34
35#include "crm_regs.h"
36#include "devices-imx53.h" 35#include "devices-imx53.h"
37 36
38#define MX53_LOCO_POWER IMX_GPIO_NR(1, 8) 37#define MX53_LOCO_POWER IMX_GPIO_NR(1, 8)
diff --git a/arch/arm/mach-mx5/board-mx53_smd.c b/arch/arm/mach-imx/mach-mx53_smd.c
index 22c53c9b18aa..8e972c5c3e13 100644
--- a/arch/arm/mach-mx5/board-mx53_smd.c
+++ b/arch/arm/mach-imx/mach-mx53_smd.c
@@ -31,7 +31,6 @@
31#include <asm/mach/arch.h> 31#include <asm/mach/arch.h>
32#include <asm/mach/time.h> 32#include <asm/mach/time.h>
33 33
34#include "crm_regs.h"
35#include "devices-imx53.h" 34#include "devices-imx53.h"
36 35
37#define SMD_FEC_PHY_RST IMX_GPIO_NR(7, 6) 36#define SMD_FEC_PHY_RST IMX_GPIO_NR(7, 6)
diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-imx/mm-imx5.c
index bc17dfea3817..bc17dfea3817 100644
--- a/arch/arm/mach-mx5/mm.c
+++ b/arch/arm/mach-imx/mm-imx5.c
diff --git a/arch/arm/mach-mx5/mx51_efika.c b/arch/arm/mach-imx/mx51_efika.c
index ec6ca91b299b..ec6ca91b299b 100644
--- a/arch/arm/mach-mx5/mx51_efika.c
+++ b/arch/arm/mach-imx/mx51_efika.c
diff --git a/arch/arm/mach-mx5/system.c b/arch/arm/mach-imx/pm-imx5.c
index 5eebfaad1226..6dc093448057 100644
--- a/arch/arm/mach-mx5/system.c
+++ b/arch/arm/mach-imx/pm-imx5.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved. 2 * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
3 */ 3 *
4
5/*
6 * The code contained herein is licensed under the GNU General Public 4 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 5 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations: 6 * Version 2 or later at the following locations:
@@ -10,14 +8,22 @@
10 * http://www.opensource.org/licenses/gpl-license.html 8 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html 9 * http://www.gnu.org/copyleft/gpl.html
12 */ 10 */
13#include <linux/platform_device.h> 11#include <linux/suspend.h>
12#include <linux/clk.h>
14#include <linux/io.h> 13#include <linux/io.h>
15#include <mach/hardware.h> 14#include <linux/err.h>
15#include <asm/cacheflush.h>
16#include <asm/tlbflush.h>
16#include <mach/common.h> 17#include <mach/common.h>
17#include "crm_regs.h" 18#include <mach/hardware.h>
19#include "crm-regs-imx5.h"
20
21static struct clk *gpc_dvfs_clk;
18 22
19/* set cpu low power mode before WFI instruction. This function is called 23/*
20 * mx5 because it can be used for mx50, mx51, and mx53.*/ 24 * set cpu low power mode before WFI instruction. This function is called
25 * mx5 because it can be used for mx50, mx51, and mx53.
26 */
21void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode) 27void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode)
22{ 28{
23 u32 plat_lpc, arm_srpgcr, ccm_clpcr; 29 u32 plat_lpc, arm_srpgcr, ccm_clpcr;
@@ -80,3 +86,68 @@ void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode)
80 __raw_writel(empgc1, MXC_SRPG_EMPGC1_SRPGCR); 86 __raw_writel(empgc1, MXC_SRPG_EMPGC1_SRPGCR);
81 } 87 }
82} 88}
89
90static int mx5_suspend_prepare(void)
91{
92 return clk_enable(gpc_dvfs_clk);
93}
94
95static int mx5_suspend_enter(suspend_state_t state)
96{
97 switch (state) {
98 case PM_SUSPEND_MEM:
99 mx5_cpu_lp_set(STOP_POWER_OFF);
100 break;
101 case PM_SUSPEND_STANDBY:
102 mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
103 break;
104 default:
105 return -EINVAL;
106 }
107
108 if (state == PM_SUSPEND_MEM) {
109 local_flush_tlb_all();
110 flush_cache_all();
111
112 /*clear the EMPGC0/1 bits */
113 __raw_writel(0, MXC_SRPG_EMPGC0_SRPGCR);
114 __raw_writel(0, MXC_SRPG_EMPGC1_SRPGCR);
115 }
116 cpu_do_idle();
117 return 0;
118}
119
120static void mx5_suspend_finish(void)
121{
122 clk_disable(gpc_dvfs_clk);
123}
124
125static int mx5_pm_valid(suspend_state_t state)
126{
127 return (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX);
128}
129
130static const struct platform_suspend_ops mx5_suspend_ops = {
131 .valid = mx5_pm_valid,
132 .prepare = mx5_suspend_prepare,
133 .enter = mx5_suspend_enter,
134 .finish = mx5_suspend_finish,
135};
136
137static int __init mx5_pm_init(void)
138{
139 if (!cpu_is_mx51() && !cpu_is_mx53())
140 return 0;
141
142 if (gpc_dvfs_clk == NULL)
143 gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs");
144
145 if (!IS_ERR(gpc_dvfs_clk)) {
146 if (cpu_is_mx51())
147 suspend_set_ops(&mx5_suspend_ops);
148 } else
149 return -EPERM;
150
151 return 0;
152}
153device_initcall(mx5_pm_init);
diff --git a/arch/arm/mach-mx5/Kconfig b/arch/arm/mach-mx5/Kconfig
deleted file mode 100644
index af0c212e3c7b..000000000000
--- a/arch/arm/mach-mx5/Kconfig
+++ /dev/null
@@ -1,244 +0,0 @@
1if ARCH_MX5
2
3# ARCH_MX5/50/53 are left to mark places where prevent multi-soc in single
4# image. So for most time, SOC_IMX50/51/53 should be used.
5
6config ARCH_MX51
7 bool
8
9config ARCH_MX50
10 bool
11
12config ARCH_MX53
13 bool
14
15config SOC_IMX50
16 bool
17 select CPU_V7
18 select ARM_L1_CACHE_SHIFT_6
19 select MXC_TZIC
20 select ARCH_MXC_IOMUX_V3
21 select ARCH_MXC_AUDMUX_V2
22 select ARCH_HAS_CPUFREQ
23 select ARCH_MX50
24
25config SOC_IMX51
26 bool
27 select CPU_V7
28 select ARM_L1_CACHE_SHIFT_6
29 select MXC_TZIC
30 select ARCH_MXC_IOMUX_V3
31 select ARCH_MXC_AUDMUX_V2
32 select ARCH_HAS_CPUFREQ
33 select ARCH_MX51
34
35config SOC_IMX53
36 bool
37 select CPU_V7
38 select ARM_L1_CACHE_SHIFT_6
39 select MXC_TZIC
40 select ARCH_MXC_IOMUX_V3
41 select ARCH_MX53
42
43#comment "i.MX50 machines:"
44
45config MACH_MX50_RDP
46 bool "Support MX50 reference design platform"
47 depends on BROKEN
48 select SOC_IMX50
49 select IMX_HAVE_PLATFORM_IMX_I2C
50 select IMX_HAVE_PLATFORM_IMX_UART
51 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
52 select IMX_HAVE_PLATFORM_SPI_IMX
53 help
54 Include support for MX50 reference design platform (RDP) board. This
55 includes specific configurations for the board and its peripherals.
56
57comment "i.MX51 machines:"
58
59config MACH_IMX51_DT
60 bool "Support i.MX51 platforms from device tree"
61 select SOC_IMX51
62 select USE_OF
63 select MACH_MX51_BABBAGE
64 help
65 Include support for Freescale i.MX51 based platforms
66 using the device tree for discovery
67
68config MACH_MX51_BABBAGE
69 bool "Support MX51 BABBAGE platforms"
70 select SOC_IMX51
71 select IMX_HAVE_PLATFORM_FSL_USB2_UDC
72 select IMX_HAVE_PLATFORM_IMX2_WDT
73 select IMX_HAVE_PLATFORM_IMX_I2C
74 select IMX_HAVE_PLATFORM_IMX_UART
75 select IMX_HAVE_PLATFORM_MXC_EHCI
76 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
77 select IMX_HAVE_PLATFORM_SPI_IMX
78 help
79 Include support for MX51 Babbage platform, also known as MX51EVK in
80 u-boot. This includes specific configurations for the board and its
81 peripherals.
82
83config MACH_MX51_3DS
84 bool "Support MX51PDK (3DS)"
85 select SOC_IMX51
86 select IMX_HAVE_PLATFORM_IMX2_WDT
87 select IMX_HAVE_PLATFORM_IMX_KEYPAD
88 select IMX_HAVE_PLATFORM_IMX_UART
89 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
90 select IMX_HAVE_PLATFORM_SPI_IMX
91 select MXC_DEBUG_BOARD
92 help
93 Include support for MX51PDK (3DS) platform. This includes specific
94 configurations for the board and its peripherals.
95
96config MACH_EUKREA_CPUIMX51
97 bool "Support Eukrea CPUIMX51 module"
98 select SOC_IMX51
99 select IMX_HAVE_PLATFORM_FSL_USB2_UDC
100 select IMX_HAVE_PLATFORM_IMX_I2C
101 select IMX_HAVE_PLATFORM_IMX_UART
102 select IMX_HAVE_PLATFORM_MXC_EHCI
103 select IMX_HAVE_PLATFORM_MXC_NAND
104 select IMX_HAVE_PLATFORM_SPI_IMX
105 help
106 Include support for Eukrea CPUIMX51 platform. This includes
107 specific configurations for the module and its peripherals.
108
109choice
110 prompt "Baseboard"
111 depends on MACH_EUKREA_CPUIMX51
112 default MACH_EUKREA_MBIMX51_BASEBOARD
113
114config MACH_EUKREA_MBIMX51_BASEBOARD
115 prompt "Eukrea MBIMX51 development board"
116 bool
117 select IMX_HAVE_PLATFORM_IMX_KEYPAD
118 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
119 select LEDS_GPIO_REGISTER
120 help
121 This adds board specific devices that can be found on Eukrea's
122 MBIMX51 evaluation board.
123
124endchoice
125
126config MACH_EUKREA_CPUIMX51SD
127 bool "Support Eukrea CPUIMX51SD module"
128 select SOC_IMX51
129 select IMX_HAVE_PLATFORM_FSL_USB2_UDC
130 select IMX_HAVE_PLATFORM_IMX_I2C
131 select IMX_HAVE_PLATFORM_IMX_UART
132 select IMX_HAVE_PLATFORM_MXC_EHCI
133 select IMX_HAVE_PLATFORM_MXC_NAND
134 select IMX_HAVE_PLATFORM_SPI_IMX
135 help
136 Include support for Eukrea CPUIMX51SD platform. This includes
137 specific configurations for the module and its peripherals.
138
139choice
140 prompt "Baseboard"
141 depends on MACH_EUKREA_CPUIMX51SD
142 default MACH_EUKREA_MBIMXSD51_BASEBOARD
143
144config MACH_EUKREA_MBIMXSD51_BASEBOARD
145 prompt "Eukrea MBIMXSD development board"
146 bool
147 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
148 select LEDS_GPIO_REGISTER
149 help
150 This adds board specific devices that can be found on Eukrea's
151 MBIMXSD evaluation board.
152
153endchoice
154
155config MX51_EFIKA_COMMON
156 bool
157 select SOC_IMX51
158 select IMX_HAVE_PLATFORM_IMX_UART
159 select IMX_HAVE_PLATFORM_MXC_EHCI
160 select IMX_HAVE_PLATFORM_PATA_IMX
161 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
162 select IMX_HAVE_PLATFORM_SPI_IMX
163 select MXC_ULPI if USB_ULPI
164
165config MACH_MX51_EFIKAMX
166 bool "Support MX51 Genesi Efika MX nettop"
167 select LEDS_GPIO_REGISTER
168 select MX51_EFIKA_COMMON
169 help
170 Include support for Genesi Efika MX nettop. This includes specific
171 configurations for the board and its peripherals.
172
173config MACH_MX51_EFIKASB
174 bool "Support MX51 Genesi Efika Smartbook"
175 select LEDS_GPIO_REGISTER
176 select MX51_EFIKA_COMMON
177 help
178 Include support for Genesi Efika Smartbook. This includes specific
179 configurations for the board and its peripherals.
180
181comment "i.MX53 machines:"
182
183config MACH_IMX53_DT
184 bool "Support i.MX53 platforms from device tree"
185 select SOC_IMX53
186 select USE_OF
187 select MACH_MX53_ARD
188 select MACH_MX53_EVK
189 select MACH_MX53_LOCO
190 select MACH_MX53_SMD
191 help
192 Include support for Freescale i.MX53 based platforms
193 using the device tree for discovery
194
195config MACH_MX53_EVK
196 bool "Support MX53 EVK platforms"
197 select SOC_IMX53
198 select IMX_HAVE_PLATFORM_IMX2_WDT
199 select IMX_HAVE_PLATFORM_IMX_UART
200 select IMX_HAVE_PLATFORM_IMX_I2C
201 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
202 select IMX_HAVE_PLATFORM_SPI_IMX
203 select LEDS_GPIO_REGISTER
204 help
205 Include support for MX53 EVK platform. This includes specific
206 configurations for the board and its peripherals.
207
208config MACH_MX53_SMD
209 bool "Support MX53 SMD platforms"
210 select SOC_IMX53
211 select IMX_HAVE_PLATFORM_IMX2_WDT
212 select IMX_HAVE_PLATFORM_IMX_I2C
213 select IMX_HAVE_PLATFORM_IMX_UART
214 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
215 help
216 Include support for MX53 SMD platform. This includes specific
217 configurations for the board and its peripherals.
218
219config MACH_MX53_LOCO
220 bool "Support MX53 LOCO platforms"
221 select SOC_IMX53
222 select IMX_HAVE_PLATFORM_IMX2_WDT
223 select IMX_HAVE_PLATFORM_IMX_I2C
224 select IMX_HAVE_PLATFORM_IMX_UART
225 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
226 select IMX_HAVE_PLATFORM_GPIO_KEYS
227 select LEDS_GPIO_REGISTER
228 help
229 Include support for MX53 LOCO platform. This includes specific
230 configurations for the board and its peripherals.
231
232config MACH_MX53_ARD
233 bool "Support MX53 ARD platforms"
234 select SOC_IMX53
235 select IMX_HAVE_PLATFORM_IMX2_WDT
236 select IMX_HAVE_PLATFORM_IMX_I2C
237 select IMX_HAVE_PLATFORM_IMX_UART
238 select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
239 select IMX_HAVE_PLATFORM_GPIO_KEYS
240 help
241 Include support for MX53 ARD platform. This includes specific
242 configurations for the board and its peripherals.
243
244endif
diff --git a/arch/arm/mach-mx5/Makefile b/arch/arm/mach-mx5/Makefile
deleted file mode 100644
index 0fc60807fa2b..000000000000
--- a/arch/arm/mach-mx5/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
1#
2# Makefile for the linux kernel.
3#
4
5# Object file lists.
6obj-y := cpu.o mm.o clock-mx51-mx53.o ehci.o system.o
7
8obj-$(CONFIG_PM) += pm-imx5.o
9obj-$(CONFIG_CPU_FREQ_IMX) += cpu_op-mx51.o
10obj-$(CONFIG_MACH_MX51_BABBAGE) += board-mx51_babbage.o
11obj-$(CONFIG_MACH_MX51_3DS) += board-mx51_3ds.o
12obj-$(CONFIG_MACH_MX53_EVK) += board-mx53_evk.o
13obj-$(CONFIG_MACH_MX53_SMD) += board-mx53_smd.o
14obj-$(CONFIG_MACH_MX53_LOCO) += board-mx53_loco.o
15obj-$(CONFIG_MACH_MX53_ARD) += board-mx53_ard.o
16obj-$(CONFIG_MACH_EUKREA_CPUIMX51) += board-cpuimx51.o
17obj-$(CONFIG_MACH_EUKREA_MBIMX51_BASEBOARD) += eukrea_mbimx51-baseboard.o
18obj-$(CONFIG_MACH_EUKREA_CPUIMX51SD) += board-cpuimx51sd.o
19obj-$(CONFIG_MACH_EUKREA_MBIMXSD51_BASEBOARD) += eukrea_mbimxsd-baseboard.o
20obj-$(CONFIG_MX51_EFIKA_COMMON) += mx51_efika.o
21obj-$(CONFIG_MACH_MX51_EFIKAMX) += board-mx51_efikamx.o
22obj-$(CONFIG_MACH_MX51_EFIKASB) += board-mx51_efikasb.o
23obj-$(CONFIG_MACH_MX50_RDP) += board-mx50_rdp.o
24
25obj-$(CONFIG_MACH_IMX51_DT) += imx51-dt.o
26obj-$(CONFIG_MACH_IMX53_DT) += imx53-dt.o
diff --git a/arch/arm/mach-mx5/Makefile.boot b/arch/arm/mach-mx5/Makefile.boot
deleted file mode 100644
index ca207ca305ec..000000000000
--- a/arch/arm/mach-mx5/Makefile.boot
+++ /dev/null
@@ -1,9 +0,0 @@
1 zreladdr-$(CONFIG_ARCH_MX50) += 0x70008000
2params_phys-$(CONFIG_ARCH_MX50) := 0x70000100
3initrd_phys-$(CONFIG_ARCH_MX50) := 0x70800000
4 zreladdr-$(CONFIG_ARCH_MX51) += 0x90008000
5params_phys-$(CONFIG_ARCH_MX51) := 0x90000100
6initrd_phys-$(CONFIG_ARCH_MX51) := 0x90800000
7 zreladdr-$(CONFIG_ARCH_MX53) += 0x70008000
8params_phys-$(CONFIG_ARCH_MX53) := 0x70000100
9initrd_phys-$(CONFIG_ARCH_MX53) := 0x70800000
diff --git a/arch/arm/mach-mx5/pm-imx5.c b/arch/arm/mach-mx5/pm-imx5.c
deleted file mode 100644
index 98052fc852c7..000000000000
--- a/arch/arm/mach-mx5/pm-imx5.c
+++ /dev/null
@@ -1,83 +0,0 @@
1/*
2 * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
3 *
4 * The code contained herein is licensed under the GNU General Public
5 * License. You may obtain a copy of the GNU General Public License
6 * Version 2 or later at the following locations:
7 *
8 * http://www.opensource.org/licenses/gpl-license.html
9 * http://www.gnu.org/copyleft/gpl.html
10 */
11#include <linux/suspend.h>
12#include <linux/clk.h>
13#include <linux/io.h>
14#include <linux/err.h>
15#include <asm/cacheflush.h>
16#include <asm/tlbflush.h>
17#include <mach/common.h>
18#include <mach/hardware.h>
19#include "crm_regs.h"
20
21static struct clk *gpc_dvfs_clk;
22
23static int mx5_suspend_prepare(void)
24{
25 return clk_enable(gpc_dvfs_clk);
26}
27
28static int mx5_suspend_enter(suspend_state_t state)
29{
30 switch (state) {
31 case PM_SUSPEND_MEM:
32 mx5_cpu_lp_set(STOP_POWER_OFF);
33 break;
34 case PM_SUSPEND_STANDBY:
35 mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
36 break;
37 default:
38 return -EINVAL;
39 }
40
41 if (state == PM_SUSPEND_MEM) {
42 local_flush_tlb_all();
43 flush_cache_all();
44
45 /*clear the EMPGC0/1 bits */
46 __raw_writel(0, MXC_SRPG_EMPGC0_SRPGCR);
47 __raw_writel(0, MXC_SRPG_EMPGC1_SRPGCR);
48 }
49 cpu_do_idle();
50 return 0;
51}
52
53static void mx5_suspend_finish(void)
54{
55 clk_disable(gpc_dvfs_clk);
56}
57
58static int mx5_pm_valid(suspend_state_t state)
59{
60 return (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX);
61}
62
63static const struct platform_suspend_ops mx5_suspend_ops = {
64 .valid = mx5_pm_valid,
65 .prepare = mx5_suspend_prepare,
66 .enter = mx5_suspend_enter,
67 .finish = mx5_suspend_finish,
68};
69
70static int __init mx5_pm_init(void)
71{
72 if (gpc_dvfs_clk == NULL)
73 gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs");
74
75 if (!IS_ERR(gpc_dvfs_clk)) {
76 if (cpu_is_mx51())
77 suspend_set_ops(&mx5_suspend_ops);
78 } else
79 return -EPERM;
80
81 return 0;
82}
83device_initcall(mx5_pm_init);
diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig
index b30708e28c1d..dcebb1230f7f 100644
--- a/arch/arm/plat-mxc/Kconfig
+++ b/arch/arm/plat-mxc/Kconfig
@@ -17,26 +17,17 @@ config ARCH_IMX_V4_V5
17 and ARMv5 SoCs 17 and ARMv5 SoCs
18 18
19config ARCH_IMX_V6_V7 19config ARCH_IMX_V6_V7
20 bool "i.MX3, i.MX6" 20 bool "i.MX3, i.MX5, i.MX6"
21 select AUTO_ZRELADDR if !ZBOOT_ROM 21 select AUTO_ZRELADDR if !ZBOOT_ROM
22 select ARM_PATCH_PHYS_VIRT 22 select ARM_PATCH_PHYS_VIRT
23 select MIGHT_HAVE_CACHE_L2X0 23 select MIGHT_HAVE_CACHE_L2X0
24 help 24 help
25 This enables support for systems based on the Freescale i.MX3 and i.MX6 25 This enables support for systems based on the Freescale i.MX3, i.MX5
26 family. 26 and i.MX6 family.
27
28config ARCH_MX5
29 bool "i.MX50, i.MX51, i.MX53"
30 select AUTO_ZRELADDR if !ZBOOT_ROM
31 select ARM_PATCH_PHYS_VIRT
32 help
33 This enables support for machines using Freescale's i.MX50 and i.MX53
34 processors.
35 27
36endchoice 28endchoice
37 29
38source "arch/arm/mach-imx/Kconfig" 30source "arch/arm/mach-imx/Kconfig"
39source "arch/arm/mach-mx5/Kconfig"
40 31
41endmenu 32endmenu
42 33
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index bfb4d01e0e51..5207035dc061 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -429,22 +429,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
429static struct acpi_table_slit __initdata *slit_table; 429static struct acpi_table_slit __initdata *slit_table;
430cpumask_t early_cpu_possible_map = CPU_MASK_NONE; 430cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
431 431
432static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) 432static int __init
433get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
433{ 434{
434 int pxm; 435 int pxm;
435 436
436 pxm = pa->proximity_domain_lo; 437 pxm = pa->proximity_domain_lo;
437 if (ia64_platform_is("sn2")) 438 if (ia64_platform_is("sn2") || acpi_srat_revision >= 2)
438 pxm += pa->proximity_domain_hi[0] << 8; 439 pxm += pa->proximity_domain_hi[0] << 8;
439 return pxm; 440 return pxm;
440} 441}
441 442
442static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma) 443static int __init
444get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
443{ 445{
444 int pxm; 446 int pxm;
445 447
446 pxm = ma->proximity_domain; 448 pxm = ma->proximity_domain;
447 if (!ia64_platform_is("sn2")) 449 if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1)
448 pxm &= 0xff; 450 pxm &= 0xff;
449 451
450 return pxm; 452 return pxm;
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index cf4e47b0948c..3f30dac804ea 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -42,6 +42,24 @@
42/* The native architecture */ 42/* The native architecture */
43#define KEXEC_ARCH KEXEC_ARCH_S390 43#define KEXEC_ARCH KEXEC_ARCH_S390
44 44
45/*
46 * Size for s390x ELF notes per CPU
47 *
48 * Seven notes plus zero note at the end: prstatus, fpregset, timer,
49 * tod_cmp, tod_reg, control regs, and prefix
50 */
51#define KEXEC_NOTE_BYTES \
52 (ALIGN(sizeof(struct elf_note), 4) * 8 + \
53 ALIGN(sizeof("CORE"), 4) * 7 + \
54 ALIGN(sizeof(struct elf_prstatus), 4) + \
55 ALIGN(sizeof(elf_fpregset_t), 4) + \
56 ALIGN(sizeof(u64), 4) + \
57 ALIGN(sizeof(u64), 4) + \
58 ALIGN(sizeof(u32), 4) + \
59 ALIGN(sizeof(u64) * 16, 4) + \
60 ALIGN(sizeof(u32), 4) \
61 )
62
45/* Provide a dummy definition to avoid build failures. */ 63/* Provide a dummy definition to avoid build failures. */
46static inline void crash_setup_regs(struct pt_regs *newregs, 64static inline void crash_setup_regs(struct pt_regs *newregs,
47 struct pt_regs *oldregs) { } 65 struct pt_regs *oldregs) { }
diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S
index 577abba3fac6..83bb96079c43 100644
--- a/arch/score/kernel/entry.S
+++ b/arch/score/kernel/entry.S
@@ -408,7 +408,7 @@ ENTRY(handle_sys)
408 sw r9, [r0, PT_EPC] 408 sw r9, [r0, PT_EPC]
409 409
410 cmpi.c r27, __NR_syscalls # check syscall number 410 cmpi.c r27, __NR_syscalls # check syscall number
411 bgtu illegal_syscall 411 bgeu illegal_syscall
412 412
413 slli r8, r27, 2 # get syscall routine 413 slli r8, r27, 2 # get syscall routine
414 la r11, sys_call_table 414 la r11, sys_call_table
diff --git a/arch/x86/.gitignore b/arch/x86/.gitignore
index 028079065af6..7cab8c08e6d1 100644
--- a/arch/x86/.gitignore
+++ b/arch/x86/.gitignore
@@ -1,3 +1,4 @@
1boot/compressed/vmlinux 1boot/compressed/vmlinux
2tools/test_get_len 2tools/test_get_len
3tools/insn_sanity
3 4
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6c14ecd851d0..864cc6e6ac8e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -125,16 +125,6 @@ config HAVE_LATENCYTOP_SUPPORT
125config MMU 125config MMU
126 def_bool y 126 def_bool y
127 127
128config ZONE_DMA
129 bool "DMA memory allocation support" if EXPERT
130 default y
131 help
132 DMA memory allocation support allows devices with less than 32-bit
133 addressing to allocate within the first 16MB of address space.
134 Disable if no such devices will be used.
135
136 If unsure, say Y.
137
138config SBUS 128config SBUS
139 bool 129 bool
140 130
@@ -255,6 +245,16 @@ source "kernel/Kconfig.freezer"
255 245
256menu "Processor type and features" 246menu "Processor type and features"
257 247
248config ZONE_DMA
249 bool "DMA memory allocation support" if EXPERT
250 default y
251 help
252 DMA memory allocation support allows devices with less than 32-bit
253 addressing to allocate within the first 16MB of address space.
254 Disable if no such devices will be used.
255
256 If unsure, say Y.
257
258source "kernel/time/Kconfig" 258source "kernel/time/Kconfig"
259 259
260config SMP 260config SMP
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index b4a3db7ce140..21f77b89e47a 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -7,6 +7,7 @@
7# include <asm/unistd_32.h> 7# include <asm/unistd_32.h>
8# define __ARCH_WANT_IPC_PARSE_VERSION 8# define __ARCH_WANT_IPC_PARSE_VERSION
9# define __ARCH_WANT_STAT64 9# define __ARCH_WANT_STAT64
10# define __ARCH_WANT_SYS_IPC
10# define __ARCH_WANT_SYS_OLD_MMAP 11# define __ARCH_WANT_SYS_OLD_MMAP
11# define __ARCH_WANT_SYS_OLD_SELECT 12# define __ARCH_WANT_SYS_OLD_SELECT
12 13
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 8e862aaf0d90..becf47b81735 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -65,7 +65,7 @@
65 * UV2: Bit 19 selects between 65 * UV2: Bit 19 selects between
66 * (0): 10 microsecond timebase and 66 * (0): 10 microsecond timebase and
67 * (1): 80 microseconds 67 * (1): 80 microseconds
68 * we're using 655us, similar to UV1: 65 units of 10us 68 * we're using 560us, similar to UV1: 65 units of 10us
69 */ 69 */
70#define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL) 70#define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
71#define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL) 71#define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
@@ -167,6 +167,7 @@
167#define FLUSH_RETRY_TIMEOUT 2 167#define FLUSH_RETRY_TIMEOUT 2
168#define FLUSH_GIVEUP 3 168#define FLUSH_GIVEUP 3
169#define FLUSH_COMPLETE 4 169#define FLUSH_COMPLETE 4
170#define FLUSH_RETRY_BUSYBUG 5
170 171
171/* 172/*
172 * tuning the action when the numalink network is extremely delayed 173 * tuning the action when the numalink network is extremely delayed
@@ -235,10 +236,10 @@ struct bau_msg_payload {
235 236
236 237
237/* 238/*
238 * Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor) 239 * UV1 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
239 * see table 4.2.3.0.1 in broacast_assist spec. 240 * see table 4.2.3.0.1 in broacast_assist spec.
240 */ 241 */
241struct bau_msg_header { 242struct uv1_bau_msg_header {
242 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ 243 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
243 /* bits 5:0 */ 244 /* bits 5:0 */
244 unsigned int base_dest_nasid:15; /* nasid of the first bit */ 245 unsigned int base_dest_nasid:15; /* nasid of the first bit */
@@ -318,19 +319,87 @@ struct bau_msg_header {
318}; 319};
319 320
320/* 321/*
322 * UV2 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
323 * see figure 9-2 of harp_sys.pdf
324 */
325struct uv2_bau_msg_header {
326 unsigned int base_dest_nasid:15; /* nasid of the first bit */
327 /* bits 14:0 */ /* in uvhub map */
328 unsigned int dest_subnodeid:5; /* must be 0x10, for the LB */
329 /* bits 19:15 */
330 unsigned int rsvd_1:1; /* must be zero */
331 /* bit 20 */
332 /* Address bits 59:21 */
333 /* bits 25:2 of address (44:21) are payload */
334 /* these next 24 bits become bytes 12-14 of msg */
335 /* bits 28:21 land in byte 12 */
336 unsigned int replied_to:1; /* sent as 0 by the source to
337 byte 12 */
338 /* bit 21 */
339 unsigned int msg_type:3; /* software type of the
340 message */
341 /* bits 24:22 */
342 unsigned int canceled:1; /* message canceled, resource
343 is to be freed*/
344 /* bit 25 */
345 unsigned int payload_1:3; /* not currently used */
346 /* bits 28:26 */
347
348 /* bits 36:29 land in byte 13 */
349 unsigned int payload_2a:3; /* not currently used */
350 unsigned int payload_2b:5; /* not currently used */
351 /* bits 36:29 */
352
353 /* bits 44:37 land in byte 14 */
354 unsigned int payload_3:8; /* not currently used */
355 /* bits 44:37 */
356
357 unsigned int rsvd_2:7; /* reserved */
358 /* bits 51:45 */
359 unsigned int swack_flag:1; /* software acknowledge flag */
360 /* bit 52 */
361 unsigned int rsvd_3a:3; /* must be zero */
362 unsigned int rsvd_3b:8; /* must be zero */
363 unsigned int rsvd_3c:8; /* must be zero */
364 unsigned int rsvd_3d:3; /* must be zero */
365 /* bits 74:53 */
366 unsigned int fairness:3; /* usually zero */
367 /* bits 77:75 */
368
369 unsigned int sequence:16; /* message sequence number */
370 /* bits 93:78 Suppl_A */
371 unsigned int chaining:1; /* next descriptor is part of
372 this activation*/
373 /* bit 94 */
374 unsigned int multilevel:1; /* multi-level multicast
375 format */
376 /* bit 95 */
377 unsigned int rsvd_4:24; /* ordered / source node /
378 source subnode / aging
379 must be zero */
380 /* bits 119:96 */
381 unsigned int command:8; /* message type */
382 /* bits 127:120 */
383};
384
385/*
321 * The activation descriptor: 386 * The activation descriptor:
322 * The format of the message to send, plus all accompanying control 387 * The format of the message to send, plus all accompanying control
323 * Should be 64 bytes 388 * Should be 64 bytes
324 */ 389 */
325struct bau_desc { 390struct bau_desc {
326 struct pnmask distribution; 391 struct pnmask distribution;
327 /* 392 /*
328 * message template, consisting of header and payload: 393 * message template, consisting of header and payload:
329 */ 394 */
330 struct bau_msg_header header; 395 union bau_msg_header {
331 struct bau_msg_payload payload; 396 struct uv1_bau_msg_header uv1_hdr;
397 struct uv2_bau_msg_header uv2_hdr;
398 } header;
399
400 struct bau_msg_payload payload;
332}; 401};
333/* 402/* UV1:
334 * -payload-- ---------header------ 403 * -payload-- ---------header------
335 * bytes 0-11 bits 41-56 bits 58-81 404 * bytes 0-11 bits 41-56 bits 58-81
336 * A B (2) C (3) 405 * A B (2) C (3)
@@ -340,6 +409,16 @@ struct bau_desc {
340 * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector) 409 * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
341 * ------------payload queue----------- 410 * ------------payload queue-----------
342 */ 411 */
412/* UV2:
413 * -payload-- ---------header------
414 * bytes 0-11 bits 70-78 bits 21-44
415 * A B (2) C (3)
416 *
417 * A/B/C are moved to:
418 * A C B
419 * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
420 * ------------payload queue-----------
421 */
343 422
344/* 423/*
345 * The payload queue on the destination side is an array of these. 424 * The payload queue on the destination side is an array of these.
@@ -385,7 +464,6 @@ struct bau_pq_entry {
385struct msg_desc { 464struct msg_desc {
386 struct bau_pq_entry *msg; 465 struct bau_pq_entry *msg;
387 int msg_slot; 466 int msg_slot;
388 int swack_slot;
389 struct bau_pq_entry *queue_first; 467 struct bau_pq_entry *queue_first;
390 struct bau_pq_entry *queue_last; 468 struct bau_pq_entry *queue_last;
391}; 469};
@@ -405,6 +483,7 @@ struct ptc_stats {
405 requests */ 483 requests */
406 unsigned long s_stimeout; /* source side timeouts */ 484 unsigned long s_stimeout; /* source side timeouts */
407 unsigned long s_dtimeout; /* destination side timeouts */ 485 unsigned long s_dtimeout; /* destination side timeouts */
486 unsigned long s_strongnacks; /* number of strong nack's */
408 unsigned long s_time; /* time spent in sending side */ 487 unsigned long s_time; /* time spent in sending side */
409 unsigned long s_retriesok; /* successful retries */ 488 unsigned long s_retriesok; /* successful retries */
410 unsigned long s_ntargcpu; /* total number of cpu's 489 unsigned long s_ntargcpu; /* total number of cpu's
@@ -439,6 +518,9 @@ struct ptc_stats {
439 unsigned long s_retry_messages; /* retry broadcasts */ 518 unsigned long s_retry_messages; /* retry broadcasts */
440 unsigned long s_bau_reenabled; /* for bau enable/disable */ 519 unsigned long s_bau_reenabled; /* for bau enable/disable */
441 unsigned long s_bau_disabled; /* for bau enable/disable */ 520 unsigned long s_bau_disabled; /* for bau enable/disable */
521 unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */
522 unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */
523 unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */
442 /* destination statistics */ 524 /* destination statistics */
443 unsigned long d_alltlb; /* times all tlb's on this 525 unsigned long d_alltlb; /* times all tlb's on this
444 cpu were flushed */ 526 cpu were flushed */
@@ -511,9 +593,12 @@ struct bau_control {
511 short osnode; 593 short osnode;
512 short uvhub_cpu; 594 short uvhub_cpu;
513 short uvhub; 595 short uvhub;
596 short uvhub_version;
514 short cpus_in_socket; 597 short cpus_in_socket;
515 short cpus_in_uvhub; 598 short cpus_in_uvhub;
516 short partition_base_pnode; 599 short partition_base_pnode;
600 short using_desc; /* an index, like uvhub_cpu */
601 unsigned int inuse_map;
517 unsigned short message_number; 602 unsigned short message_number;
518 unsigned short uvhub_quiesce; 603 unsigned short uvhub_quiesce;
519 short socket_acknowledge_count[DEST_Q_SIZE]; 604 short socket_acknowledge_count[DEST_Q_SIZE];
@@ -531,6 +616,7 @@ struct bau_control {
531 int cong_response_us; 616 int cong_response_us;
532 int cong_reps; 617 int cong_reps;
533 int cong_period; 618 int cong_period;
619 unsigned long clocks_per_100_usec;
534 cycles_t period_time; 620 cycles_t period_time;
535 long period_requests; 621 long period_requests;
536 struct hub_and_pnode *thp; 622 struct hub_and_pnode *thp;
@@ -591,6 +677,11 @@ static inline void write_mmr_sw_ack(unsigned long mr)
591 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr); 677 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
592} 678}
593 679
680static inline void write_gmmr_sw_ack(int pnode, unsigned long mr)
681{
682 write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
683}
684
594static inline unsigned long read_mmr_sw_ack(void) 685static inline unsigned long read_mmr_sw_ack(void)
595{ 686{
596 return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); 687 return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 174d938d576b..62d61e9976eb 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -703,7 +703,7 @@ void __init e820_mark_nosave_regions(unsigned long limit_pfn)
703} 703}
704#endif 704#endif
705 705
706#ifdef CONFIG_HIBERNATION 706#ifdef CONFIG_ACPI
707/** 707/**
708 * Mark ACPI NVS memory region, so that we can save/restore it during 708 * Mark ACPI NVS memory region, so that we can save/restore it during
709 * hibernation and the subsequent resume. 709 * hibernation and the subsequent resume.
@@ -716,7 +716,7 @@ static int __init e820_mark_nvs_memory(void)
716 struct e820entry *ei = &e820.map[i]; 716 struct e820entry *ei = &e820.map[i];
717 717
718 if (ei->type == E820_NVS) 718 if (ei->type == E820_NVS)
719 suspend_nvs_register(ei->addr, ei->size); 719 acpi_nvs_register(ei->addr, ei->size);
720 } 720 }
721 721
722 return 0; 722 return 0;
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index c0dd5b603749..a62c201c97ec 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -290,14 +290,15 @@ static inline int pit_verify_msb(unsigned char val)
290static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) 290static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
291{ 291{
292 int count; 292 int count;
293 u64 tsc = 0; 293 u64 tsc = 0, prev_tsc = 0;
294 294
295 for (count = 0; count < 50000; count++) { 295 for (count = 0; count < 50000; count++) {
296 if (!pit_verify_msb(val)) 296 if (!pit_verify_msb(val))
297 break; 297 break;
298 prev_tsc = tsc;
298 tsc = get_cycles(); 299 tsc = get_cycles();
299 } 300 }
300 *deltap = get_cycles() - tsc; 301 *deltap = get_cycles() - prev_tsc;
301 *tscp = tsc; 302 *tscp = tsc;
302 303
303 /* 304 /*
@@ -311,9 +312,9 @@ static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *de
311 * How many MSB values do we want to see? We aim for 312 * How many MSB values do we want to see? We aim for
312 * a maximum error rate of 500ppm (in practice the 313 * a maximum error rate of 500ppm (in practice the
313 * real error is much smaller), but refuse to spend 314 * real error is much smaller), but refuse to spend
314 * more than 25ms on it. 315 * more than 50ms on it.
315 */ 316 */
316#define MAX_QUICK_PIT_MS 25 317#define MAX_QUICK_PIT_MS 50
317#define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) 318#define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
318 319
319static unsigned long quick_pit_calibrate(void) 320static unsigned long quick_pit_calibrate(void)
@@ -383,15 +384,12 @@ success:
383 * 384 *
384 * As a result, we can depend on there not being 385 * As a result, we can depend on there not being
385 * any odd delays anywhere, and the TSC reads are 386 * any odd delays anywhere, and the TSC reads are
386 * reliable (within the error). We also adjust the 387 * reliable (within the error).
387 * delta to the middle of the error bars, just
388 * because it looks nicer.
389 * 388 *
390 * kHz = ticks / time-in-seconds / 1000; 389 * kHz = ticks / time-in-seconds / 1000;
391 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 390 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
392 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) 391 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
393 */ 392 */
394 delta += (long)(d2 - d1)/2;
395 delta *= PIT_TICK_RATE; 393 delta *= PIT_TICK_RATE;
396 do_div(delta, i*256*1000); 394 do_div(delta, i*256*1000);
397 printk("Fast TSC calibration using PIT\n"); 395 printk("Fast TSC calibration using PIT\n");
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 5b83c51c12e0..819137904428 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -219,7 +219,9 @@ ab: STOS/W/D/Q Yv,rAX
219ac: LODS/B AL,Xb 219ac: LODS/B AL,Xb
220ad: LODS/W/D/Q rAX,Xv 220ad: LODS/W/D/Q rAX,Xv
221ae: SCAS/B AL,Yb 221ae: SCAS/B AL,Yb
222af: SCAS/W/D/Q rAX,Xv 222# Note: The May 2011 Intel manual shows Xv for the second parameter of the
223# next instruction but Yv is correct
224af: SCAS/W/D/Q rAX,Yv
223# 0xb0 - 0xbf 225# 0xb0 - 0xbf
224b0: MOV AL/R8L,Ib 226b0: MOV AL/R8L,Ib
225b1: MOV CL/R9L,Ib 227b1: MOV CL/R9L,Ib
@@ -729,8 +731,8 @@ de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
729df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1) 731df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
730f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) 732f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2)
731f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) 733f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2)
732f3: ANDN Gy,By,Ey (v) 734f2: ANDN Gy,By,Ey (v)
733f4: Grp17 (1A) 735f3: Grp17 (1A)
734f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v) 736f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
735f6: MULX By,Gy,rDX,Ey (F2),(v) 737f6: MULX By,Gy,rDX,Ey (F2),(v)
736f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v) 738f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index fd61b3fb7341..1c1c4f46a7c1 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -109,6 +109,8 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
109 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) 109 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
110 return; 110 return;
111 pxm = pa->proximity_domain_lo; 111 pxm = pa->proximity_domain_lo;
112 if (acpi_srat_revision >= 2)
113 pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8;
112 node = setup_node(pxm); 114 node = setup_node(pxm);
113 if (node < 0) { 115 if (node < 0) {
114 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm); 116 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
@@ -160,6 +162,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
160 start = ma->base_address; 162 start = ma->base_address;
161 end = start + ma->length; 163 end = start + ma->length;
162 pxm = ma->proximity_domain; 164 pxm = ma->proximity_domain;
165 if (acpi_srat_revision <= 1)
166 pxm &= 0xff;
163 node = setup_node(pxm); 167 node = setup_node(pxm);
164 if (node < 0) { 168 if (node < 0) {
165 printk(KERN_ERR "SRAT: Too many proximity domains.\n"); 169 printk(KERN_ERR "SRAT: Too many proximity domains.\n");
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 5b552198f774..9be4cff00a2d 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -157,13 +157,14 @@ static int __init uvhub_to_first_apicid(int uvhub)
157 * clear of the Timeout bit (as well) will free the resource. No reply will 157 * clear of the Timeout bit (as well) will free the resource. No reply will
158 * be sent (the hardware will only do one reply per message). 158 * be sent (the hardware will only do one reply per message).
159 */ 159 */
160static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp) 160static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
161 int do_acknowledge)
161{ 162{
162 unsigned long dw; 163 unsigned long dw;
163 struct bau_pq_entry *msg; 164 struct bau_pq_entry *msg;
164 165
165 msg = mdp->msg; 166 msg = mdp->msg;
166 if (!msg->canceled) { 167 if (!msg->canceled && do_acknowledge) {
167 dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec; 168 dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
168 write_mmr_sw_ack(dw); 169 write_mmr_sw_ack(dw);
169 } 170 }
@@ -212,8 +213,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
212 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) { 213 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
213 unsigned long mr; 214 unsigned long mr;
214 /* 215 /*
215 * is the resource timed out? 216 * Is the resource timed out?
216 * make everyone ignore the cancelled message. 217 * Make everyone ignore the cancelled message.
217 */ 218 */
218 msg2->canceled = 1; 219 msg2->canceled = 1;
219 stat->d_canceled++; 220 stat->d_canceled++;
@@ -231,8 +232,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
231 * Do all the things a cpu should do for a TLB shootdown message. 232 * Do all the things a cpu should do for a TLB shootdown message.
232 * Other cpu's may come here at the same time for this message. 233 * Other cpu's may come here at the same time for this message.
233 */ 234 */
234static void bau_process_message(struct msg_desc *mdp, 235static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
235 struct bau_control *bcp) 236 int do_acknowledge)
236{ 237{
237 short socket_ack_count = 0; 238 short socket_ack_count = 0;
238 short *sp; 239 short *sp;
@@ -284,8 +285,9 @@ static void bau_process_message(struct msg_desc *mdp,
284 if (msg_ack_count == bcp->cpus_in_uvhub) { 285 if (msg_ack_count == bcp->cpus_in_uvhub) {
285 /* 286 /*
286 * All cpus in uvhub saw it; reply 287 * All cpus in uvhub saw it; reply
288 * (unless we are in the UV2 workaround)
287 */ 289 */
288 reply_to_message(mdp, bcp); 290 reply_to_message(mdp, bcp, do_acknowledge);
289 } 291 }
290 } 292 }
291 293
@@ -491,27 +493,138 @@ static int uv1_wait_completion(struct bau_desc *bau_desc,
491/* 493/*
492 * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register. 494 * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
493 */ 495 */
494static unsigned long uv2_read_status(unsigned long offset, int rshft, int cpu) 496static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc)
495{ 497{
496 unsigned long descriptor_status; 498 unsigned long descriptor_status;
497 unsigned long descriptor_status2; 499 unsigned long descriptor_status2;
498 500
499 descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK); 501 descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
500 descriptor_status2 = (read_mmr_uv2_status() >> cpu) & 0x1UL; 502 descriptor_status2 = (read_mmr_uv2_status() >> desc) & 0x1UL;
501 descriptor_status = (descriptor_status << 1) | descriptor_status2; 503 descriptor_status = (descriptor_status << 1) | descriptor_status2;
502 return descriptor_status; 504 return descriptor_status;
503} 505}
504 506
507/*
508 * Return whether the status of the descriptor that is normally used for this
509 * cpu (the one indexed by its hub-relative cpu number) is busy.
510 * The status of the original 32 descriptors is always reflected in the 64
511 * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
512 * The bit provided by the activation_status_2 register is irrelevant to
513 * the status if it is only being tested for busy or not busy.
514 */
515int normal_busy(struct bau_control *bcp)
516{
517 int cpu = bcp->uvhub_cpu;
518 int mmr_offset;
519 int right_shift;
520
521 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
522 right_shift = cpu * UV_ACT_STATUS_SIZE;
523 return (((((read_lmmr(mmr_offset) >> right_shift) &
524 UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
525}
526
527/*
528 * Entered when a bau descriptor has gone into a permanent busy wait because
529 * of a hardware bug.
530 * Workaround the bug.
531 */
532int handle_uv2_busy(struct bau_control *bcp)
533{
534 int busy_one = bcp->using_desc;
535 int normal = bcp->uvhub_cpu;
536 int selected = -1;
537 int i;
538 unsigned long descriptor_status;
539 unsigned long status;
540 int mmr_offset;
541 struct bau_desc *bau_desc_old;
542 struct bau_desc *bau_desc_new;
543 struct bau_control *hmaster = bcp->uvhub_master;
544 struct ptc_stats *stat = bcp->statp;
545 cycles_t ttm;
546
547 stat->s_uv2_wars++;
548 spin_lock(&hmaster->uvhub_lock);
549 /* try for the original first */
550 if (busy_one != normal) {
551 if (!normal_busy(bcp))
552 selected = normal;
553 }
554 if (selected < 0) {
555 /* can't use the normal, select an alternate */
556 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
557 descriptor_status = read_lmmr(mmr_offset);
558
559 /* scan available descriptors 32-63 */
560 for (i = 0; i < UV_CPUS_PER_AS; i++) {
561 if ((hmaster->inuse_map & (1 << i)) == 0) {
562 status = ((descriptor_status >>
563 (i * UV_ACT_STATUS_SIZE)) &
564 UV_ACT_STATUS_MASK) << 1;
565 if (status != UV2H_DESC_BUSY) {
566 selected = i + UV_CPUS_PER_AS;
567 break;
568 }
569 }
570 }
571 }
572
573 if (busy_one != normal)
574 /* mark the busy alternate as not in-use */
575 hmaster->inuse_map &= ~(1 << (busy_one - UV_CPUS_PER_AS));
576
577 if (selected >= 0) {
578 /* switch to the selected descriptor */
579 if (selected != normal) {
580 /* set the selected alternate as in-use */
581 hmaster->inuse_map |=
582 (1 << (selected - UV_CPUS_PER_AS));
583 if (selected > stat->s_uv2_wars_hw)
584 stat->s_uv2_wars_hw = selected;
585 }
586 bau_desc_old = bcp->descriptor_base;
587 bau_desc_old += (ITEMS_PER_DESC * busy_one);
588 bcp->using_desc = selected;
589 bau_desc_new = bcp->descriptor_base;
590 bau_desc_new += (ITEMS_PER_DESC * selected);
591 *bau_desc_new = *bau_desc_old;
592 } else {
593 /*
594 * All are busy. Wait for the normal one for this cpu to
595 * free up.
596 */
597 stat->s_uv2_war_waits++;
598 spin_unlock(&hmaster->uvhub_lock);
599 ttm = get_cycles();
600 do {
601 cpu_relax();
602 } while (normal_busy(bcp));
603 spin_lock(&hmaster->uvhub_lock);
604 /* switch to the original descriptor */
605 bcp->using_desc = normal;
606 bau_desc_old = bcp->descriptor_base;
607 bau_desc_old += (ITEMS_PER_DESC * bcp->using_desc);
608 bcp->using_desc = (ITEMS_PER_DESC * normal);
609 bau_desc_new = bcp->descriptor_base;
610 bau_desc_new += (ITEMS_PER_DESC * normal);
611 *bau_desc_new = *bau_desc_old; /* copy the entire descriptor */
612 }
613 spin_unlock(&hmaster->uvhub_lock);
614 return FLUSH_RETRY_BUSYBUG;
615}
616
505static int uv2_wait_completion(struct bau_desc *bau_desc, 617static int uv2_wait_completion(struct bau_desc *bau_desc,
506 unsigned long mmr_offset, int right_shift, 618 unsigned long mmr_offset, int right_shift,
507 struct bau_control *bcp, long try) 619 struct bau_control *bcp, long try)
508{ 620{
509 unsigned long descriptor_stat; 621 unsigned long descriptor_stat;
510 cycles_t ttm; 622 cycles_t ttm;
511 int cpu = bcp->uvhub_cpu; 623 int desc = bcp->using_desc;
624 long busy_reps = 0;
512 struct ptc_stats *stat = bcp->statp; 625 struct ptc_stats *stat = bcp->statp;
513 626
514 descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu); 627 descriptor_stat = uv2_read_status(mmr_offset, right_shift, desc);
515 628
516 /* spin on the status MMR, waiting for it to go idle */ 629 /* spin on the status MMR, waiting for it to go idle */
517 while (descriptor_stat != UV2H_DESC_IDLE) { 630 while (descriptor_stat != UV2H_DESC_IDLE) {
@@ -522,32 +635,35 @@ static int uv2_wait_completion(struct bau_desc *bau_desc,
522 * our message and its state will stay IDLE. 635 * our message and its state will stay IDLE.
523 */ 636 */
524 if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) || 637 if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) ||
525 (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) ||
526 (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) { 638 (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) {
527 stat->s_stimeout++; 639 stat->s_stimeout++;
528 return FLUSH_GIVEUP; 640 return FLUSH_GIVEUP;
641 } else if (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) {
642 stat->s_strongnacks++;
643 bcp->conseccompletes = 0;
644 return FLUSH_GIVEUP;
529 } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) { 645 } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
530 stat->s_dtimeout++; 646 stat->s_dtimeout++;
531 ttm = get_cycles();
532 /*
533 * Our retries may be blocked by all destination
534 * swack resources being consumed, and a timeout
535 * pending. In that case hardware returns the
536 * ERROR that looks like a destination timeout.
537 */
538 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
539 bcp->conseccompletes = 0;
540 return FLUSH_RETRY_PLUGGED;
541 }
542 bcp->conseccompletes = 0; 647 bcp->conseccompletes = 0;
543 return FLUSH_RETRY_TIMEOUT; 648 return FLUSH_RETRY_TIMEOUT;
544 } else { 649 } else {
650 busy_reps++;
651 if (busy_reps > 1000000) {
652 /* not to hammer on the clock */
653 busy_reps = 0;
654 ttm = get_cycles();
655 if ((ttm - bcp->send_message) >
656 (bcp->clocks_per_100_usec)) {
657 return handle_uv2_busy(bcp);
658 }
659 }
545 /* 660 /*
546 * descriptor_stat is still BUSY 661 * descriptor_stat is still BUSY
547 */ 662 */
548 cpu_relax(); 663 cpu_relax();
549 } 664 }
550 descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu); 665 descriptor_stat = uv2_read_status(mmr_offset, right_shift,
666 desc);
551 } 667 }
552 bcp->conseccompletes++; 668 bcp->conseccompletes++;
553 return FLUSH_COMPLETE; 669 return FLUSH_COMPLETE;
@@ -563,17 +679,17 @@ static int wait_completion(struct bau_desc *bau_desc,
563{ 679{
564 int right_shift; 680 int right_shift;
565 unsigned long mmr_offset; 681 unsigned long mmr_offset;
566 int cpu = bcp->uvhub_cpu; 682 int desc = bcp->using_desc;
567 683
568 if (cpu < UV_CPUS_PER_AS) { 684 if (desc < UV_CPUS_PER_AS) {
569 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; 685 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
570 right_shift = cpu * UV_ACT_STATUS_SIZE; 686 right_shift = desc * UV_ACT_STATUS_SIZE;
571 } else { 687 } else {
572 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1; 688 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
573 right_shift = ((cpu - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE); 689 right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
574 } 690 }
575 691
576 if (is_uv1_hub()) 692 if (bcp->uvhub_version == 1)
577 return uv1_wait_completion(bau_desc, mmr_offset, right_shift, 693 return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
578 bcp, try); 694 bcp, try);
579 else 695 else
@@ -752,19 +868,22 @@ static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
752 * Returns 1 if it gives up entirely and the original cpu mask is to be 868 * Returns 1 if it gives up entirely and the original cpu mask is to be
753 * returned to the kernel. 869 * returned to the kernel.
754 */ 870 */
755int uv_flush_send_and_wait(struct bau_desc *bau_desc, 871int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
756 struct cpumask *flush_mask, struct bau_control *bcp)
757{ 872{
758 int seq_number = 0; 873 int seq_number = 0;
759 int completion_stat = 0; 874 int completion_stat = 0;
875 int uv1 = 0;
760 long try = 0; 876 long try = 0;
761 unsigned long index; 877 unsigned long index;
762 cycles_t time1; 878 cycles_t time1;
763 cycles_t time2; 879 cycles_t time2;
764 struct ptc_stats *stat = bcp->statp; 880 struct ptc_stats *stat = bcp->statp;
765 struct bau_control *hmaster = bcp->uvhub_master; 881 struct bau_control *hmaster = bcp->uvhub_master;
882 struct uv1_bau_msg_header *uv1_hdr = NULL;
883 struct uv2_bau_msg_header *uv2_hdr = NULL;
884 struct bau_desc *bau_desc;
766 885
767 if (is_uv1_hub()) 886 if (bcp->uvhub_version == 1)
768 uv1_throttle(hmaster, stat); 887 uv1_throttle(hmaster, stat);
769 888
770 while (hmaster->uvhub_quiesce) 889 while (hmaster->uvhub_quiesce)
@@ -772,22 +891,39 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
772 891
773 time1 = get_cycles(); 892 time1 = get_cycles();
774 do { 893 do {
775 if (try == 0) { 894 bau_desc = bcp->descriptor_base;
776 bau_desc->header.msg_type = MSG_REGULAR; 895 bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
896 if (bcp->uvhub_version == 1) {
897 uv1 = 1;
898 uv1_hdr = &bau_desc->header.uv1_hdr;
899 } else
900 uv2_hdr = &bau_desc->header.uv2_hdr;
901 if ((try == 0) || (completion_stat == FLUSH_RETRY_BUSYBUG)) {
902 if (uv1)
903 uv1_hdr->msg_type = MSG_REGULAR;
904 else
905 uv2_hdr->msg_type = MSG_REGULAR;
777 seq_number = bcp->message_number++; 906 seq_number = bcp->message_number++;
778 } else { 907 } else {
779 bau_desc->header.msg_type = MSG_RETRY; 908 if (uv1)
909 uv1_hdr->msg_type = MSG_RETRY;
910 else
911 uv2_hdr->msg_type = MSG_RETRY;
780 stat->s_retry_messages++; 912 stat->s_retry_messages++;
781 } 913 }
782 914
783 bau_desc->header.sequence = seq_number; 915 if (uv1)
784 index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu; 916 uv1_hdr->sequence = seq_number;
917 else
918 uv2_hdr->sequence = seq_number;
919 index = (1UL << AS_PUSH_SHIFT) | bcp->using_desc;
785 bcp->send_message = get_cycles(); 920 bcp->send_message = get_cycles();
786 921
787 write_mmr_activation(index); 922 write_mmr_activation(index);
788 923
789 try++; 924 try++;
790 completion_stat = wait_completion(bau_desc, bcp, try); 925 completion_stat = wait_completion(bau_desc, bcp, try);
926 /* UV2: wait_completion() may change the bcp->using_desc */
791 927
792 handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat); 928 handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
793 929
@@ -798,6 +934,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
798 } 934 }
799 cpu_relax(); 935 cpu_relax();
800 } while ((completion_stat == FLUSH_RETRY_PLUGGED) || 936 } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
937 (completion_stat == FLUSH_RETRY_BUSYBUG) ||
801 (completion_stat == FLUSH_RETRY_TIMEOUT)); 938 (completion_stat == FLUSH_RETRY_TIMEOUT));
802 939
803 time2 = get_cycles(); 940 time2 = get_cycles();
@@ -812,6 +949,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
812 record_send_stats(time1, time2, bcp, stat, completion_stat, try); 949 record_send_stats(time1, time2, bcp, stat, completion_stat, try);
813 950
814 if (completion_stat == FLUSH_GIVEUP) 951 if (completion_stat == FLUSH_GIVEUP)
952 /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
815 return 1; 953 return 1;
816 return 0; 954 return 0;
817} 955}
@@ -967,7 +1105,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
967 stat->s_ntargself++; 1105 stat->s_ntargself++;
968 1106
969 bau_desc = bcp->descriptor_base; 1107 bau_desc = bcp->descriptor_base;
970 bau_desc += ITEMS_PER_DESC * bcp->uvhub_cpu; 1108 bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
971 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); 1109 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
972 if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes)) 1110 if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
973 return NULL; 1111 return NULL;
@@ -980,13 +1118,86 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
980 * uv_flush_send_and_wait returns 0 if all cpu's were messaged, 1118 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
981 * or 1 if it gave up and the original cpumask should be returned. 1119 * or 1 if it gave up and the original cpumask should be returned.
982 */ 1120 */
983 if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp)) 1121 if (!uv_flush_send_and_wait(flush_mask, bcp))
984 return NULL; 1122 return NULL;
985 else 1123 else
986 return cpumask; 1124 return cpumask;
987} 1125}
988 1126
989/* 1127/*
1128 * Search the message queue for any 'other' message with the same software
1129 * acknowledge resource bit vector.
1130 */
1131struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
1132 struct bau_control *bcp, unsigned char swack_vec)
1133{
1134 struct bau_pq_entry *msg_next = msg + 1;
1135
1136 if (msg_next > bcp->queue_last)
1137 msg_next = bcp->queue_first;
1138 while ((msg_next->swack_vec != 0) && (msg_next != msg)) {
1139 if (msg_next->swack_vec == swack_vec)
1140 return msg_next;
1141 msg_next++;
1142 if (msg_next > bcp->queue_last)
1143 msg_next = bcp->queue_first;
1144 }
1145 return NULL;
1146}
1147
1148/*
1149 * UV2 needs to work around a bug in which an arriving message has not
1150 * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
1151 * Such a message must be ignored.
1152 */
1153void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
1154{
1155 unsigned long mmr_image;
1156 unsigned char swack_vec;
1157 struct bau_pq_entry *msg = mdp->msg;
1158 struct bau_pq_entry *other_msg;
1159
1160 mmr_image = read_mmr_sw_ack();
1161 swack_vec = msg->swack_vec;
1162
1163 if ((swack_vec & mmr_image) == 0) {
1164 /*
1165 * This message was assigned a swack resource, but no
1166 * reserved acknowlegment is pending.
1167 * The bug has prevented this message from setting the MMR.
1168 * And no other message has used the same sw_ack resource.
1169 * Do the requested shootdown but do not reply to the msg.
1170 * (the 0 means make no acknowledge)
1171 */
1172 bau_process_message(mdp, bcp, 0);
1173 return;
1174 }
1175
1176 /*
1177 * Some message has set the MMR 'pending' bit; it might have been
1178 * another message. Look for that message.
1179 */
1180 other_msg = find_another_by_swack(msg, bcp, msg->swack_vec);
1181 if (other_msg) {
1182 /* There is another. Do not ack the current one. */
1183 bau_process_message(mdp, bcp, 0);
1184 /*
1185 * Let the natural processing of that message acknowledge
1186 * it. Don't get the processing of sw_ack's out of order.
1187 */
1188 return;
1189 }
1190
1191 /*
1192 * There is no other message using this sw_ack, so it is safe to
1193 * acknowledge it.
1194 */
1195 bau_process_message(mdp, bcp, 1);
1196
1197 return;
1198}
1199
1200/*
990 * The BAU message interrupt comes here. (registered by set_intr_gate) 1201 * The BAU message interrupt comes here. (registered by set_intr_gate)
991 * See entry_64.S 1202 * See entry_64.S
992 * 1203 *
@@ -1009,6 +1220,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
1009 struct ptc_stats *stat; 1220 struct ptc_stats *stat;
1010 struct msg_desc msgdesc; 1221 struct msg_desc msgdesc;
1011 1222
1223 ack_APIC_irq();
1012 time_start = get_cycles(); 1224 time_start = get_cycles();
1013 1225
1014 bcp = &per_cpu(bau_control, smp_processor_id()); 1226 bcp = &per_cpu(bau_control, smp_processor_id());
@@ -1022,9 +1234,11 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
1022 count++; 1234 count++;
1023 1235
1024 msgdesc.msg_slot = msg - msgdesc.queue_first; 1236 msgdesc.msg_slot = msg - msgdesc.queue_first;
1025 msgdesc.swack_slot = ffs(msg->swack_vec) - 1;
1026 msgdesc.msg = msg; 1237 msgdesc.msg = msg;
1027 bau_process_message(&msgdesc, bcp); 1238 if (bcp->uvhub_version == 2)
1239 process_uv2_message(&msgdesc, bcp);
1240 else
1241 bau_process_message(&msgdesc, bcp, 1);
1028 1242
1029 msg++; 1243 msg++;
1030 if (msg > msgdesc.queue_last) 1244 if (msg > msgdesc.queue_last)
@@ -1036,8 +1250,6 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
1036 stat->d_nomsg++; 1250 stat->d_nomsg++;
1037 else if (count > 1) 1251 else if (count > 1)
1038 stat->d_multmsg++; 1252 stat->d_multmsg++;
1039
1040 ack_APIC_irq();
1041} 1253}
1042 1254
1043/* 1255/*
@@ -1083,7 +1295,7 @@ static void __init enable_timeouts(void)
1083 */ 1295 */
1084 mmr_image |= (1L << SOFTACK_MSHIFT); 1296 mmr_image |= (1L << SOFTACK_MSHIFT);
1085 if (is_uv2_hub()) { 1297 if (is_uv2_hub()) {
1086 mmr_image |= (1L << UV2_LEG_SHFT); 1298 mmr_image &= ~(1L << UV2_LEG_SHFT);
1087 mmr_image |= (1L << UV2_EXT_SHFT); 1299 mmr_image |= (1L << UV2_EXT_SHFT);
1088 } 1300 }
1089 write_mmr_misc_control(pnode, mmr_image); 1301 write_mmr_misc_control(pnode, mmr_image);
@@ -1136,13 +1348,13 @@ static int ptc_seq_show(struct seq_file *file, void *data)
1136 seq_printf(file, 1348 seq_printf(file,
1137 "remotehub numuvhubs numuvhubs16 numuvhubs8 "); 1349 "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
1138 seq_printf(file, 1350 seq_printf(file,
1139 "numuvhubs4 numuvhubs2 numuvhubs1 dto retries rok "); 1351 "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries rok ");
1140 seq_printf(file, 1352 seq_printf(file,
1141 "resetp resett giveup sto bz throt swack recv rtime "); 1353 "resetp resett giveup sto bz throt swack recv rtime ");
1142 seq_printf(file, 1354 seq_printf(file,
1143 "all one mult none retry canc nocan reset rcan "); 1355 "all one mult none retry canc nocan reset rcan ");
1144 seq_printf(file, 1356 seq_printf(file,
1145 "disable enable\n"); 1357 "disable enable wars warshw warwaits\n");
1146 } 1358 }
1147 if (cpu < num_possible_cpus() && cpu_online(cpu)) { 1359 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
1148 stat = &per_cpu(ptcstats, cpu); 1360 stat = &per_cpu(ptcstats, cpu);
@@ -1154,10 +1366,10 @@ static int ptc_seq_show(struct seq_file *file, void *data)
1154 stat->s_ntargremotes, stat->s_ntargcpu, 1366 stat->s_ntargremotes, stat->s_ntargcpu,
1155 stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub, 1367 stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
1156 stat->s_ntarguvhub, stat->s_ntarguvhub16); 1368 stat->s_ntarguvhub, stat->s_ntarguvhub16);
1157 seq_printf(file, "%ld %ld %ld %ld %ld ", 1369 seq_printf(file, "%ld %ld %ld %ld %ld %ld ",
1158 stat->s_ntarguvhub8, stat->s_ntarguvhub4, 1370 stat->s_ntarguvhub8, stat->s_ntarguvhub4,
1159 stat->s_ntarguvhub2, stat->s_ntarguvhub1, 1371 stat->s_ntarguvhub2, stat->s_ntarguvhub1,
1160 stat->s_dtimeout); 1372 stat->s_dtimeout, stat->s_strongnacks);
1161 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ", 1373 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
1162 stat->s_retry_messages, stat->s_retriesok, 1374 stat->s_retry_messages, stat->s_retriesok,
1163 stat->s_resets_plug, stat->s_resets_timeout, 1375 stat->s_resets_plug, stat->s_resets_timeout,
@@ -1173,8 +1385,10 @@ static int ptc_seq_show(struct seq_file *file, void *data)
1173 stat->d_nomsg, stat->d_retries, stat->d_canceled, 1385 stat->d_nomsg, stat->d_retries, stat->d_canceled,
1174 stat->d_nocanceled, stat->d_resets, 1386 stat->d_nocanceled, stat->d_resets,
1175 stat->d_rcanceled); 1387 stat->d_rcanceled);
1176 seq_printf(file, "%ld %ld\n", 1388 seq_printf(file, "%ld %ld %ld %ld %ld\n",
1177 stat->s_bau_disabled, stat->s_bau_reenabled); 1389 stat->s_bau_disabled, stat->s_bau_reenabled,
1390 stat->s_uv2_wars, stat->s_uv2_wars_hw,
1391 stat->s_uv2_war_waits);
1178 } 1392 }
1179 return 0; 1393 return 0;
1180} 1394}
@@ -1432,12 +1646,15 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
1432{ 1646{
1433 int i; 1647 int i;
1434 int cpu; 1648 int cpu;
1649 int uv1 = 0;
1435 unsigned long gpa; 1650 unsigned long gpa;
1436 unsigned long m; 1651 unsigned long m;
1437 unsigned long n; 1652 unsigned long n;
1438 size_t dsize; 1653 size_t dsize;
1439 struct bau_desc *bau_desc; 1654 struct bau_desc *bau_desc;
1440 struct bau_desc *bd2; 1655 struct bau_desc *bd2;
1656 struct uv1_bau_msg_header *uv1_hdr;
1657 struct uv2_bau_msg_header *uv2_hdr;
1441 struct bau_control *bcp; 1658 struct bau_control *bcp;
1442 1659
1443 /* 1660 /*
@@ -1451,6 +1668,8 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
1451 gpa = uv_gpa(bau_desc); 1668 gpa = uv_gpa(bau_desc);
1452 n = uv_gpa_to_gnode(gpa); 1669 n = uv_gpa_to_gnode(gpa);
1453 m = uv_gpa_to_offset(gpa); 1670 m = uv_gpa_to_offset(gpa);
1671 if (is_uv1_hub())
1672 uv1 = 1;
1454 1673
1455 /* the 14-bit pnode */ 1674 /* the 14-bit pnode */
1456 write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m)); 1675 write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
@@ -1461,21 +1680,33 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
1461 */ 1680 */
1462 for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) { 1681 for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
1463 memset(bd2, 0, sizeof(struct bau_desc)); 1682 memset(bd2, 0, sizeof(struct bau_desc));
1464 bd2->header.swack_flag = 1; 1683 if (uv1) {
1465 /* 1684 uv1_hdr = &bd2->header.uv1_hdr;
1466 * The base_dest_nasid set in the message header is the nasid 1685 uv1_hdr->swack_flag = 1;
1467 * of the first uvhub in the partition. The bit map will 1686 /*
1468 * indicate destination pnode numbers relative to that base. 1687 * The base_dest_nasid set in the message header
1469 * They may not be consecutive if nasid striding is being used. 1688 * is the nasid of the first uvhub in the partition.
1470 */ 1689 * The bit map will indicate destination pnode numbers
1471 bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode); 1690 * relative to that base. They may not be consecutive
1472 bd2->header.dest_subnodeid = UV_LB_SUBNODEID; 1691 * if nasid striding is being used.
1473 bd2->header.command = UV_NET_ENDPOINT_INTD; 1692 */
1474 bd2->header.int_both = 1; 1693 uv1_hdr->base_dest_nasid =
1475 /* 1694 UV_PNODE_TO_NASID(base_pnode);
1476 * all others need to be set to zero: 1695 uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1477 * fairness chaining multilevel count replied_to 1696 uv1_hdr->command = UV_NET_ENDPOINT_INTD;
1478 */ 1697 uv1_hdr->int_both = 1;
1698 /*
1699 * all others need to be set to zero:
1700 * fairness chaining multilevel count replied_to
1701 */
1702 } else {
1703 uv2_hdr = &bd2->header.uv2_hdr;
1704 uv2_hdr->swack_flag = 1;
1705 uv2_hdr->base_dest_nasid =
1706 UV_PNODE_TO_NASID(base_pnode);
1707 uv2_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1708 uv2_hdr->command = UV_NET_ENDPOINT_INTD;
1709 }
1479 } 1710 }
1480 for_each_present_cpu(cpu) { 1711 for_each_present_cpu(cpu) {
1481 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu))) 1712 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
@@ -1531,6 +1762,7 @@ static void pq_init(int node, int pnode)
1531 write_mmr_payload_first(pnode, pn_first); 1762 write_mmr_payload_first(pnode, pn_first);
1532 write_mmr_payload_tail(pnode, first); 1763 write_mmr_payload_tail(pnode, first);
1533 write_mmr_payload_last(pnode, last); 1764 write_mmr_payload_last(pnode, last);
1765 write_gmmr_sw_ack(pnode, 0xffffUL);
1534 1766
1535 /* in effect, all msg_type's are set to MSG_NOOP */ 1767 /* in effect, all msg_type's are set to MSG_NOOP */
1536 memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE); 1768 memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
@@ -1584,14 +1816,14 @@ static int calculate_destination_timeout(void)
1584 ts_ns = base * mult1 * mult2; 1816 ts_ns = base * mult1 * mult2;
1585 ret = ts_ns / 1000; 1817 ret = ts_ns / 1000;
1586 } else { 1818 } else {
1587 /* 4 bits 0/1 for 10/80us, 3 bits of multiplier */ 1819 /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
1588 mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL); 1820 mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
1589 mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT; 1821 mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
1590 if (mmr_image & (1L << UV2_ACK_UNITS_SHFT)) 1822 if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
1591 mult1 = 80; 1823 base = 80;
1592 else 1824 else
1593 mult1 = 10; 1825 base = 10;
1594 base = mmr_image & UV2_ACK_MASK; 1826 mult1 = mmr_image & UV2_ACK_MASK;
1595 ret = mult1 * base; 1827 ret = mult1 * base;
1596 } 1828 }
1597 return ret; 1829 return ret;
@@ -1618,6 +1850,7 @@ static void __init init_per_cpu_tunables(void)
1618 bcp->cong_response_us = congested_respns_us; 1850 bcp->cong_response_us = congested_respns_us;
1619 bcp->cong_reps = congested_reps; 1851 bcp->cong_reps = congested_reps;
1620 bcp->cong_period = congested_period; 1852 bcp->cong_period = congested_period;
1853 bcp->clocks_per_100_usec = usec_2_cycles(100);
1621 } 1854 }
1622} 1855}
1623 1856
@@ -1728,8 +1961,17 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
1728 bcp->cpus_in_socket = sdp->num_cpus; 1961 bcp->cpus_in_socket = sdp->num_cpus;
1729 bcp->socket_master = *smasterp; 1962 bcp->socket_master = *smasterp;
1730 bcp->uvhub = bdp->uvhub; 1963 bcp->uvhub = bdp->uvhub;
1964 if (is_uv1_hub())
1965 bcp->uvhub_version = 1;
1966 else if (is_uv2_hub())
1967 bcp->uvhub_version = 2;
1968 else {
1969 printk(KERN_EMERG "uvhub version not 1 or 2\n");
1970 return 1;
1971 }
1731 bcp->uvhub_master = *hmasterp; 1972 bcp->uvhub_master = *hmasterp;
1732 bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id; 1973 bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
1974 bcp->using_desc = bcp->uvhub_cpu;
1733 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) { 1975 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
1734 printk(KERN_EMERG "%d cpus per uvhub invalid\n", 1976 printk(KERN_EMERG "%d cpus per uvhub invalid\n",
1735 bcp->uvhub_cpu); 1977 bcp->uvhub_cpu);
@@ -1845,6 +2087,8 @@ static int __init uv_bau_init(void)
1845 uv_base_pnode = uv_blade_to_pnode(uvhub); 2087 uv_base_pnode = uv_blade_to_pnode(uvhub);
1846 } 2088 }
1847 2089
2090 enable_timeouts();
2091
1848 if (init_per_cpu(nuvhubs, uv_base_pnode)) { 2092 if (init_per_cpu(nuvhubs, uv_base_pnode)) {
1849 nobau = 1; 2093 nobau = 1;
1850 return 0; 2094 return 0;
@@ -1855,7 +2099,6 @@ static int __init uv_bau_init(void)
1855 if (uv_blade_nr_possible_cpus(uvhub)) 2099 if (uv_blade_nr_possible_cpus(uvhub))
1856 init_uvhub(uvhub, vector, uv_base_pnode); 2100 init_uvhub(uvhub, vector, uv_base_pnode);
1857 2101
1858 enable_timeouts();
1859 alloc_intr_gate(vector, uv_bau_message_intr1); 2102 alloc_intr_gate(vector, uv_bau_message_intr1);
1860 2103
1861 for_each_possible_blade(uvhub) { 2104 for_each_possible_blade(uvhub) {
@@ -1867,7 +2110,8 @@ static int __init uv_bau_init(void)
1867 val = 1L << 63; 2110 val = 1L << 63;
1868 write_gmmr_activation(pnode, val); 2111 write_gmmr_activation(pnode, val);
1869 mmr = 1; /* should be 1 to broadcast to both sockets */ 2112 mmr = 1; /* should be 1 to broadcast to both sockets */
1870 write_mmr_data_broadcast(pnode, mmr); 2113 if (!is_uv1_hub())
2114 write_mmr_data_broadcast(pnode, mmr);
1871 } 2115 }
1872 } 2116 }
1873 2117
diff --git a/arch/x86/um/shared/sysdep/ptrace.h b/arch/x86/um/shared/sysdep/ptrace.h
index 5ef9344a8b24..2bbe1ec2d96a 100644
--- a/arch/x86/um/shared/sysdep/ptrace.h
+++ b/arch/x86/um/shared/sysdep/ptrace.h
@@ -1,3 +1,6 @@
1#ifndef __SYSDEP_X86_PTRACE_H
2#define __SYSDEP_X86_PTRACE_H
3
1#ifdef __i386__ 4#ifdef __i386__
2#include "ptrace_32.h" 5#include "ptrace_32.h"
3#else 6#else
@@ -8,3 +11,5 @@ static inline long regs_return_value(struct uml_pt_regs *regs)
8{ 11{
9 return UPT_SYSCALL_RET(regs); 12 return UPT_SYSCALL_RET(regs);
10} 13}
14
15#endif /* __SYSDEP_X86_PTRACE_H */
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index ecb26b4f29a0..c07f44f05f9d 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -20,11 +20,12 @@ obj-y += acpi.o \
20# All the builtin files are in the "acpi." module_param namespace. 20# All the builtin files are in the "acpi." module_param namespace.
21acpi-y += osl.o utils.o reboot.o 21acpi-y += osl.o utils.o reboot.o
22acpi-y += atomicio.o 22acpi-y += atomicio.o
23acpi-y += nvs.o
23 24
24# sleep related files 25# sleep related files
25acpi-y += wakeup.o 26acpi-y += wakeup.o
26acpi-y += sleep.o 27acpi-y += sleep.o
27acpi-$(CONFIG_ACPI_SLEEP) += proc.o nvs.o 28acpi-$(CONFIG_ACPI_SLEEP) += proc.o
28 29
29 30
30# 31#
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 301bd2d388ad..0ca208b6dcf0 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -8,41 +8,151 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
8# use acpi.o to put all files here into acpi.o modparam namespace 8# use acpi.o to put all files here into acpi.o modparam namespace
9obj-y += acpi.o 9obj-y += acpi.o
10 10
11acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \ 11acpi-y := \
12 dsmethod.o dsobject.o dsutils.o dswload.o dswstate.o \ 12 dsargs.o \
13 dsinit.o dsargs.o dscontrol.o dswload2.o 13 dscontrol.o \
14 dsfield.o \
15 dsinit.o \
16 dsmethod.o \
17 dsmthdat.o \
18 dsobject.o \
19 dsopcode.o \
20 dsutils.o \
21 dswexec.o \
22 dswload.o \
23 dswload2.o \
24 dswscope.o \
25 dswstate.o
14 26
15acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \ 27acpi-y += \
16 evmisc.o evrgnini.o evxface.o evxfregn.o \ 28 evevent.o \
17 evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o evglock.o 29 evgpe.o \
30 evgpeblk.o \
31 evgpeinit.o \
32 evgpeutil.o \
33 evglock.o \
34 evmisc.o \
35 evregion.o \
36 evrgnini.o \
37 evsci.o \
38 evxface.o \
39 evxfevnt.o \
40 evxfgpe.o \
41 evxfregn.o
18 42
19acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ 43acpi-y += \
20 exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\ 44 exconfig.o \
21 excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \ 45 exconvrt.o \
22 exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o exdebug.o 46 excreate.o \
47 exdebug.o \
48 exdump.o \
49 exfield.o \
50 exfldio.o \
51 exmutex.o \
52 exnames.o \
53 exoparg1.o \
54 exoparg2.o \
55 exoparg3.o \
56 exoparg6.o \
57 exprep.o \
58 exmisc.o \
59 exregion.o \
60 exresnte.o \
61 exresolv.o \
62 exresop.o \
63 exstore.o \
64 exstoren.o \
65 exstorob.o \
66 exsystem.o \
67 exutils.o
23 68
24acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o hwpci.o 69acpi-y += \
70 hwacpi.o \
71 hwgpe.o \
72 hwpci.o \
73 hwregs.o \
74 hwsleep.o \
75 hwvalid.o \
76 hwxface.o
25 77
26acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o 78acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
27 79
28acpi-y += nsaccess.o nsload.o nssearch.o nsxfeval.o \ 80acpi-y += \
29 nsalloc.o nseval.o nsnames.o nsutils.o nsxfname.o \ 81 nsaccess.o \
30 nsdump.o nsinit.o nsobject.o nswalk.o nsxfobj.o \ 82 nsalloc.o \
31 nsparse.o nspredef.o nsrepair.o nsrepair2.o 83 nsdump.o \
84 nseval.o \
85 nsinit.o \
86 nsload.o \
87 nsnames.o \
88 nsobject.o \
89 nsparse.o \
90 nspredef.o \
91 nsrepair.o \
92 nsrepair2.o \
93 nssearch.o \
94 nsutils.o \
95 nswalk.o \
96 nsxfeval.o \
97 nsxfname.o \
98 nsxfobj.o
32 99
33acpi-$(ACPI_FUTURE_USAGE) += nsdumpdv.o 100acpi-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
34 101
35acpi-y += psargs.o psparse.o psloop.o pstree.o pswalk.o \ 102acpi-y += \
36 psopcode.o psscope.o psutils.o psxface.o 103 psargs.o \
104 psloop.o \
105 psopcode.o \
106 psparse.o \
107 psscope.o \
108 pstree.o \
109 psutils.o \
110 pswalk.o \
111 psxface.o
37 112
38acpi-y += rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \ 113acpi-y += \
39 rscalc.o rsirq.o rsmemory.o rsutils.o 114 rsaddr.o \
115 rscalc.o \
116 rscreate.o \
117 rsinfo.o \
118 rsio.o \
119 rsirq.o \
120 rslist.o \
121 rsmemory.o \
122 rsmisc.o \
123 rsserial.o \
124 rsutils.o \
125 rsxface.o
40 126
41acpi-$(ACPI_FUTURE_USAGE) += rsdump.o 127acpi-$(ACPI_FUTURE_USAGE) += rsdump.o
42 128
43acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o 129acpi-y += \
130 tbfadt.o \
131 tbfind.o \
132 tbinstal.o \
133 tbutils.o \
134 tbxface.o \
135 tbxfroot.o
44 136
45acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \ 137acpi-y += \
46 utcopy.o utdelete.o utglobal.o utmath.o utobject.o \ 138 utaddress.o \
47 utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o \ 139 utalloc.o \
48 utosi.o utxferror.o utdecode.o 140 utcopy.o \
141 utdebug.o \
142 utdecode.o \
143 utdelete.o \
144 uteval.o \
145 utglobal.o \
146 utids.o \
147 utinit.o \
148 utlock.o \
149 utmath.o \
150 utmisc.o \
151 utmutex.o \
152 utobject.o \
153 utosi.o \
154 utresrc.o \
155 utstate.o \
156 utxface.o \
157 utxferror.o \
158 utxfmutex.o
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index e0ba17f0a7c8..a44bd424f9f4 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index f895a244ca7e..1f30af613e87 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -123,6 +123,10 @@
123 123
124#define ACPI_MAX_SLEEP 2000 /* Two seconds */ 124#define ACPI_MAX_SLEEP 2000 /* Two seconds */
125 125
126/* Address Range lists are per-space_id (Memory and I/O only) */
127
128#define ACPI_ADDRESS_RANGE_MAX 2
129
126/****************************************************************************** 130/******************************************************************************
127 * 131 *
128 * ACPI Specification constants (Do not change unless the specification changes) 132 * ACPI Specification constants (Do not change unless the specification changes)
@@ -202,9 +206,10 @@
202#define ACPI_RSDP_CHECKSUM_LENGTH 20 206#define ACPI_RSDP_CHECKSUM_LENGTH 20
203#define ACPI_RSDP_XCHECKSUM_LENGTH 36 207#define ACPI_RSDP_XCHECKSUM_LENGTH 36
204 208
205/* SMBus and IPMI bidirectional buffer size */ 209/* SMBus, GSBus and IPMI bidirectional buffer size */
206 210
207#define ACPI_SMBUS_BUFFER_SIZE 34 211#define ACPI_SMBUS_BUFFER_SIZE 34
212#define ACPI_GSBUS_BUFFER_SIZE 34
208#define ACPI_IPMI_BUFFER_SIZE 66 213#define ACPI_IPMI_BUFFER_SIZE 66
209 214
210/* _sx_d and _sx_w control methods */ 215/* _sx_d and _sx_w control methods */
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index eb0b1f8dee6d..deaa81979561 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 2d1b7ffa377a..5935ba6707e2 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index bea3b4899183..c53caa521a30 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -162,6 +162,7 @@ acpi_status acpi_ev_initialize_op_regions(void);
162 162
163acpi_status 163acpi_status
164acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, 164acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
165 union acpi_operand_object *field_obj,
165 u32 function, 166 u32 function,
166 u32 region_offset, u32 bit_width, u64 *value); 167 u32 region_offset, u32 bit_width, u64 *value);
167 168
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index e6652d716e45..2853f7673f3b 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -140,8 +140,19 @@ u32 acpi_gbl_trace_flags;
140acpi_name acpi_gbl_trace_method_name; 140acpi_name acpi_gbl_trace_method_name;
141u8 acpi_gbl_system_awake_and_running; 141u8 acpi_gbl_system_awake_and_running;
142 142
143/*
144 * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning
145 * that the ACPI hardware is no longer required. A flag in the FADT indicates
146 * a reduced HW machine, and that flag is duplicated here for convenience.
147 */
148u8 acpi_gbl_reduced_hardware;
149
143#endif 150#endif
144 151
152/* Do not disassemble buffers to resource descriptors */
153
154ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE);
155
145/***************************************************************************** 156/*****************************************************************************
146 * 157 *
147 * Debug support 158 * Debug support
@@ -207,7 +218,7 @@ ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
207 218
208/***************************************************************************** 219/*****************************************************************************
209 * 220 *
210 * Mutual exlusion within ACPICA subsystem 221 * Mutual exclusion within ACPICA subsystem
211 * 222 *
212 ****************************************************************************/ 223 ****************************************************************************/
213 224
@@ -295,6 +306,8 @@ ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
295ACPI_EXTERN u8 acpi_gbl_events_initialized; 306ACPI_EXTERN u8 acpi_gbl_events_initialized;
296ACPI_EXTERN u8 acpi_gbl_osi_data; 307ACPI_EXTERN u8 acpi_gbl_osi_data;
297ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces; 308ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces;
309ACPI_EXTERN struct acpi_address_range
310 *acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX];
298 311
299#ifndef DEFINE_ACPI_GLOBALS 312#ifndef DEFINE_ACPI_GLOBALS
300 313
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index e7213beaafc7..677793e938f5 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 3731e1c34b83..eb308635da72 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -468,6 +468,8 @@ void acpi_ex_eisa_id_to_string(char *dest, u64 compressed_id);
468 468
469void acpi_ex_integer_to_string(char *dest, u64 value); 469void acpi_ex_integer_to_string(char *dest, u64 value);
470 470
471u8 acpi_is_valid_space_id(u8 space_id);
472
471/* 473/*
472 * exregion - default op_region handlers 474 * exregion - default op_region handlers
473 */ 475 */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 5552125d8340..3f24068837d5 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -53,7 +53,7 @@ typedef u32 acpi_mutex_handle;
53 53
54/* Total number of aml opcodes defined */ 54/* Total number of aml opcodes defined */
55 55
56#define AML_NUM_OPCODES 0x7F 56#define AML_NUM_OPCODES 0x81
57 57
58/* Forward declarations */ 58/* Forward declarations */
59 59
@@ -249,12 +249,16 @@ struct acpi_create_field_info {
249 struct acpi_namespace_node *field_node; 249 struct acpi_namespace_node *field_node;
250 struct acpi_namespace_node *register_node; 250 struct acpi_namespace_node *register_node;
251 struct acpi_namespace_node *data_register_node; 251 struct acpi_namespace_node *data_register_node;
252 struct acpi_namespace_node *connection_node;
253 u8 *resource_buffer;
252 u32 bank_value; 254 u32 bank_value;
253 u32 field_bit_position; 255 u32 field_bit_position;
254 u32 field_bit_length; 256 u32 field_bit_length;
257 u16 resource_length;
255 u8 field_flags; 258 u8 field_flags;
256 u8 attribute; 259 u8 attribute;
257 u8 field_type; 260 u8 field_type;
261 u8 access_length;
258}; 262};
259 263
260typedef 264typedef
@@ -315,7 +319,8 @@ struct acpi_name_info {
315 319
316/* 320/*
317 * Used for ACPI_PTYPE1_FIXED, ACPI_PTYPE1_VAR, ACPI_PTYPE2, 321 * Used for ACPI_PTYPE1_FIXED, ACPI_PTYPE1_VAR, ACPI_PTYPE2,
318 * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT 322 * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT,
323 * ACPI_PTYPE2_FIX_VAR
319 */ 324 */
320struct acpi_package_info { 325struct acpi_package_info {
321 u8 type; 326 u8 type;
@@ -625,6 +630,15 @@ union acpi_generic_state {
625 630
626typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state); 631typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state);
627 632
633/* Address Range info block */
634
635struct acpi_address_range {
636 struct acpi_address_range *next;
637 struct acpi_namespace_node *region_node;
638 acpi_physical_address start_address;
639 acpi_physical_address end_address;
640};
641
628/***************************************************************************** 642/*****************************************************************************
629 * 643 *
630 * Parser typedefs and structs 644 * Parser typedefs and structs
@@ -951,7 +965,7 @@ struct acpi_port_info {
951#define ACPI_RESOURCE_NAME_END_DEPENDENT 0x38 965#define ACPI_RESOURCE_NAME_END_DEPENDENT 0x38
952#define ACPI_RESOURCE_NAME_IO 0x40 966#define ACPI_RESOURCE_NAME_IO 0x40
953#define ACPI_RESOURCE_NAME_FIXED_IO 0x48 967#define ACPI_RESOURCE_NAME_FIXED_IO 0x48
954#define ACPI_RESOURCE_NAME_RESERVED_S1 0x50 968#define ACPI_RESOURCE_NAME_FIXED_DMA 0x50
955#define ACPI_RESOURCE_NAME_RESERVED_S2 0x58 969#define ACPI_RESOURCE_NAME_RESERVED_S2 0x58
956#define ACPI_RESOURCE_NAME_RESERVED_S3 0x60 970#define ACPI_RESOURCE_NAME_RESERVED_S3 0x60
957#define ACPI_RESOURCE_NAME_RESERVED_S4 0x68 971#define ACPI_RESOURCE_NAME_RESERVED_S4 0x68
@@ -973,7 +987,9 @@ struct acpi_port_info {
973#define ACPI_RESOURCE_NAME_EXTENDED_IRQ 0x89 987#define ACPI_RESOURCE_NAME_EXTENDED_IRQ 0x89
974#define ACPI_RESOURCE_NAME_ADDRESS64 0x8A 988#define ACPI_RESOURCE_NAME_ADDRESS64 0x8A
975#define ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 0x8B 989#define ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 0x8B
976#define ACPI_RESOURCE_NAME_LARGE_MAX 0x8B 990#define ACPI_RESOURCE_NAME_GPIO 0x8C
991#define ACPI_RESOURCE_NAME_SERIAL_BUS 0x8E
992#define ACPI_RESOURCE_NAME_LARGE_MAX 0x8E
977 993
978/***************************************************************************** 994/*****************************************************************************
979 * 995 *
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index b7491ee1fba6..ef338a96f5b2 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 79a598c67fe3..2c9e0f049523 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 1055769f2f01..c065078ca83b 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -254,6 +254,7 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
254 u32 base_byte_offset; /* Byte offset within containing object */\ 254 u32 base_byte_offset; /* Byte offset within containing object */\
255 u32 value; /* Value to store into the Bank or Index register */\ 255 u32 value; /* Value to store into the Bank or Index register */\
256 u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\ 256 u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\
257 u8 access_length; /* For serial regions/fields */
257 258
258 259
259struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */ 260struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
@@ -261,7 +262,9 @@ struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and
261}; 262};
262 263
263struct acpi_object_region_field { 264struct acpi_object_region_field {
264 ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Containing op_region object */ 265 ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length;
266 union acpi_operand_object *region_obj; /* Containing op_region object */
267 u8 *resource_buffer; /* resource_template for serial regions/fields */
265}; 268};
266 269
267struct acpi_object_bank_field { 270struct acpi_object_bank_field {
@@ -358,6 +361,7 @@ typedef enum {
358 */ 361 */
359struct acpi_object_extra { 362struct acpi_object_extra {
360 ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */ 363 ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */
364 struct acpi_namespace_node *scope_node;
361 void *region_context; /* Region-specific data */ 365 void *region_context; /* Region-specific data */
362 u8 *aml_start; 366 u8 *aml_start;
363 u32 aml_length; 367 u32 aml_length;
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index bb2ccfad7376..9440d053fbb3 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -93,6 +93,7 @@
93#define ARGP_CONCAT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) 93#define ARGP_CONCAT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
94#define ARGP_CONCAT_RES_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) 94#define ARGP_CONCAT_RES_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
95#define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_SUPERNAME) 95#define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_SUPERNAME)
96#define ARGP_CONNECTFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING)
96#define ARGP_CONTINUE_OP ARG_NONE 97#define ARGP_CONTINUE_OP ARG_NONE
97#define ARGP_COPY_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_SIMPLENAME) 98#define ARGP_COPY_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_SIMPLENAME)
98#define ARGP_CREATE_BIT_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME) 99#define ARGP_CREATE_BIT_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
@@ -164,6 +165,7 @@
164#define ARGP_RETURN_OP ARGP_LIST1 (ARGP_TERMARG) 165#define ARGP_RETURN_OP ARGP_LIST1 (ARGP_TERMARG)
165#define ARGP_REVISION_OP ARG_NONE 166#define ARGP_REVISION_OP ARG_NONE
166#define ARGP_SCOPE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_TERMLIST) 167#define ARGP_SCOPE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_TERMLIST)
168#define ARGP_SERIALFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING)
167#define ARGP_SHIFT_LEFT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) 169#define ARGP_SHIFT_LEFT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
168#define ARGP_SHIFT_RIGHT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) 170#define ARGP_SHIFT_RIGHT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
169#define ARGP_SIGNAL_OP ARGP_LIST1 (ARGP_SUPERNAME) 171#define ARGP_SIGNAL_OP ARGP_LIST1 (ARGP_SUPERNAME)
@@ -223,6 +225,7 @@
223#define ARGI_CONCAT_OP ARGI_LIST3 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA, ARGI_TARGETREF) 225#define ARGI_CONCAT_OP ARGI_LIST3 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA, ARGI_TARGETREF)
224#define ARGI_CONCAT_RES_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_BUFFER, ARGI_TARGETREF) 226#define ARGI_CONCAT_RES_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_BUFFER, ARGI_TARGETREF)
225#define ARGI_COND_REF_OF_OP ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF) 227#define ARGI_COND_REF_OF_OP ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF)
228#define ARGI_CONNECTFIELD_OP ARGI_INVALID_OPCODE
226#define ARGI_CONTINUE_OP ARGI_INVALID_OPCODE 229#define ARGI_CONTINUE_OP ARGI_INVALID_OPCODE
227#define ARGI_COPY_OP ARGI_LIST2 (ARGI_ANYTYPE, ARGI_SIMPLE_TARGET) 230#define ARGI_COPY_OP ARGI_LIST2 (ARGI_ANYTYPE, ARGI_SIMPLE_TARGET)
228#define ARGI_CREATE_BIT_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE) 231#define ARGI_CREATE_BIT_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE)
@@ -294,6 +297,7 @@
294#define ARGI_RETURN_OP ARGI_INVALID_OPCODE 297#define ARGI_RETURN_OP ARGI_INVALID_OPCODE
295#define ARGI_REVISION_OP ARG_NONE 298#define ARGI_REVISION_OP ARG_NONE
296#define ARGI_SCOPE_OP ARGI_INVALID_OPCODE 299#define ARGI_SCOPE_OP ARGI_INVALID_OPCODE
300#define ARGI_SERIALFIELD_OP ARGI_INVALID_OPCODE
297#define ARGI_SHIFT_LEFT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF) 301#define ARGI_SHIFT_LEFT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
298#define ARGI_SHIFT_RIGHT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF) 302#define ARGI_SHIFT_RIGHT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
299#define ARGI_SIGNAL_OP ARGI_LIST1 (ARGI_EVENT) 303#define ARGI_SIGNAL_OP ARGI_LIST1 (ARGI_EVENT)
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 5ea1e06afa20..b725d780d34d 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index c445cca490ea..bbb34c9be4e8 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -94,6 +94,14 @@
94 * ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length 94 * ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length
95 * (Used for _ART, _FPS) 95 * (Used for _ART, _FPS)
96 * 96 *
97 * ACPI_PTYPE2_FIX_VAR: Each subpackage consists of some fixed-length elements
98 * followed by an optional element
99 * object type
100 * count
101 * object type
102 * count = 0 (optional)
103 * (Used for _DLM)
104 *
97 *****************************************************************************/ 105 *****************************************************************************/
98 106
99enum acpi_return_package_types { 107enum acpi_return_package_types {
@@ -105,7 +113,8 @@ enum acpi_return_package_types {
105 ACPI_PTYPE2_PKG_COUNT = 6, 113 ACPI_PTYPE2_PKG_COUNT = 6,
106 ACPI_PTYPE2_FIXED = 7, 114 ACPI_PTYPE2_FIXED = 7,
107 ACPI_PTYPE2_MIN = 8, 115 ACPI_PTYPE2_MIN = 8,
108 ACPI_PTYPE2_REV_FIXED = 9 116 ACPI_PTYPE2_REV_FIXED = 9,
117 ACPI_PTYPE2_FIX_VAR = 10
109}; 118};
110 119
111#ifdef ACPI_CREATE_PREDEFINED_TABLE 120#ifdef ACPI_CREATE_PREDEFINED_TABLE
@@ -154,6 +163,7 @@ static const union acpi_predefined_info predefined_names[] =
154 {{"_AC8", 0, ACPI_RTYPE_INTEGER}}, 163 {{"_AC8", 0, ACPI_RTYPE_INTEGER}},
155 {{"_AC9", 0, ACPI_RTYPE_INTEGER}}, 164 {{"_AC9", 0, ACPI_RTYPE_INTEGER}},
156 {{"_ADR", 0, ACPI_RTYPE_INTEGER}}, 165 {{"_ADR", 0, ACPI_RTYPE_INTEGER}},
166 {{"_AEI", 0, ACPI_RTYPE_BUFFER}},
157 {{"_AL0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 167 {{"_AL0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
158 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 168 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
159 169
@@ -229,6 +239,13 @@ static const union acpi_predefined_info predefined_names[] =
229 {{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */ 239 {{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */
230 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0,0}, 0,0}}, 240 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0,0}, 0,0}},
231 241
242 {{"_CLS", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int) */
243 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
244
245 {{"_CPC", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Bufs) */
246 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER, 0, 0}, 0,
247 0}},
248
232 {{"_CRS", 0, ACPI_RTYPE_BUFFER}}, 249 {{"_CRS", 0, ACPI_RTYPE_BUFFER}},
233 {{"_CRT", 0, ACPI_RTYPE_INTEGER}}, 250 {{"_CRT", 0, ACPI_RTYPE_INTEGER}},
234 {{"_CSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n-1 Int) */ 251 {{"_CSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n-1 Int) */
@@ -237,12 +254,21 @@ static const union acpi_predefined_info predefined_names[] =
237 {{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */ 254 {{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */
238 {{{ACPI_PTYPE2_PKG_COUNT,ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,0}}, 255 {{{ACPI_PTYPE2_PKG_COUNT,ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,0}},
239 256
257 {{"_CWS", 1, ACPI_RTYPE_INTEGER}},
240 {{"_DCK", 1, ACPI_RTYPE_INTEGER}}, 258 {{"_DCK", 1, ACPI_RTYPE_INTEGER}},
241 {{"_DCS", 0, ACPI_RTYPE_INTEGER}}, 259 {{"_DCS", 0, ACPI_RTYPE_INTEGER}},
242 {{"_DDC", 1, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER}}, 260 {{"_DDC", 1, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER}},
243 {{"_DDN", 0, ACPI_RTYPE_STRING}}, 261 {{"_DDN", 0, ACPI_RTYPE_STRING}},
262 {{"_DEP", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
263 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
264
244 {{"_DGS", 0, ACPI_RTYPE_INTEGER}}, 265 {{"_DGS", 0, ACPI_RTYPE_INTEGER}},
245 {{"_DIS", 0, 0}}, 266 {{"_DIS", 0, 0}},
267
268 {{"_DLM", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (1 Ref, 0/1 Optional Buf/Ref) */
269 {{{ACPI_PTYPE2_FIX_VAR, ACPI_RTYPE_REFERENCE, 1,
270 ACPI_RTYPE_REFERENCE | ACPI_RTYPE_BUFFER}, 0, 0}},
271
246 {{"_DMA", 0, ACPI_RTYPE_BUFFER}}, 272 {{"_DMA", 0, ACPI_RTYPE_BUFFER}},
247 {{"_DOD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */ 273 {{"_DOD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */
248 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}}, 274 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
@@ -262,6 +288,7 @@ static const union acpi_predefined_info predefined_names[] =
262 {{"_EJ3", 1, 0}}, 288 {{"_EJ3", 1, 0}},
263 {{"_EJ4", 1, 0}}, 289 {{"_EJ4", 1, 0}},
264 {{"_EJD", 0, ACPI_RTYPE_STRING}}, 290 {{"_EJD", 0, ACPI_RTYPE_STRING}},
291 {{"_EVT", 1, 0}},
265 {{"_FDE", 0, ACPI_RTYPE_BUFFER}}, 292 {{"_FDE", 0, ACPI_RTYPE_BUFFER}},
266 {{"_FDI", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int) */ 293 {{"_FDI", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int) */
267 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,0}, 0,0}}, 294 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,0}, 0,0}},
@@ -281,14 +308,17 @@ static const union acpi_predefined_info predefined_names[] =
281 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}}, 308 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
282 309
283 {{"_GAI", 0, ACPI_RTYPE_INTEGER}}, 310 {{"_GAI", 0, ACPI_RTYPE_INTEGER}},
311 {{"_GCP", 0, ACPI_RTYPE_INTEGER}},
284 {{"_GHL", 0, ACPI_RTYPE_INTEGER}}, 312 {{"_GHL", 0, ACPI_RTYPE_INTEGER}},
285 {{"_GLK", 0, ACPI_RTYPE_INTEGER}}, 313 {{"_GLK", 0, ACPI_RTYPE_INTEGER}},
286 {{"_GPD", 0, ACPI_RTYPE_INTEGER}}, 314 {{"_GPD", 0, ACPI_RTYPE_INTEGER}},
287 {{"_GPE", 0, ACPI_RTYPE_INTEGER}}, /* _GPE method, not _GPE scope */ 315 {{"_GPE", 0, ACPI_RTYPE_INTEGER}}, /* _GPE method, not _GPE scope */
316 {{"_GRT", 0, ACPI_RTYPE_BUFFER}},
288 {{"_GSB", 0, ACPI_RTYPE_INTEGER}}, 317 {{"_GSB", 0, ACPI_RTYPE_INTEGER}},
289 {{"_GTF", 0, ACPI_RTYPE_BUFFER}}, 318 {{"_GTF", 0, ACPI_RTYPE_BUFFER}},
290 {{"_GTM", 0, ACPI_RTYPE_BUFFER}}, 319 {{"_GTM", 0, ACPI_RTYPE_BUFFER}},
291 {{"_GTS", 1, 0}}, 320 {{"_GTS", 1, 0}},
321 {{"_GWS", 1, ACPI_RTYPE_INTEGER}},
292 {{"_HID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}}, 322 {{"_HID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}},
293 {{"_HOT", 0, ACPI_RTYPE_INTEGER}}, 323 {{"_HOT", 0, ACPI_RTYPE_INTEGER}},
294 {{"_HPP", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */ 324 {{"_HPP", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
@@ -303,6 +333,7 @@ static const union acpi_predefined_info predefined_names[] =
303 {{"_HPX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (var Ints) */ 333 {{"_HPX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (var Ints) */
304 {{{ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5,0}, 0,0}}, 334 {{{ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
305 335
336 {{"_HRV", 0, ACPI_RTYPE_INTEGER}},
306 {{"_IFT", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */ 337 {{"_IFT", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
307 {{"_INI", 0, 0}}, 338 {{"_INI", 0, 0}},
308 {{"_IRC", 0, 0}}, 339 {{"_IRC", 0, 0}},
@@ -361,6 +392,9 @@ static const union acpi_predefined_info predefined_names[] =
361 {{"_PR3", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 392 {{"_PR3", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
362 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}}, 393 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
363 394
395 {{"_PRE", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
396 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
397
364 {{"_PRL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 398 {{"_PRL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
365 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}}, 399 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
366 400
@@ -391,6 +425,7 @@ static const union acpi_predefined_info predefined_names[] =
391 {{"_PSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (5 Int) with count */ 425 {{"_PSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (5 Int) with count */
392 {{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER,0,0}, 0,0}}, 426 {{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER,0,0}, 0,0}},
393 427
428 {{"_PSE", 1, 0}},
394 {{"_PSL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ 429 {{"_PSL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
395 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, 430 {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
396 431
@@ -457,6 +492,7 @@ static const union acpi_predefined_info predefined_names[] =
457 {{"_SLI", 0, ACPI_RTYPE_BUFFER}}, 492 {{"_SLI", 0, ACPI_RTYPE_BUFFER}},
458 {{"_SPD", 1, ACPI_RTYPE_INTEGER}}, 493 {{"_SPD", 1, ACPI_RTYPE_INTEGER}},
459 {{"_SRS", 1, 0}}, 494 {{"_SRS", 1, 0}},
495 {{"_SRT", 1, ACPI_RTYPE_INTEGER}},
460 {{"_SRV", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */ 496 {{"_SRV", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
461 {{"_SST", 1, 0}}, 497 {{"_SST", 1, 0}},
462 {{"_STA", 0, ACPI_RTYPE_INTEGER}}, 498 {{"_STA", 0, ACPI_RTYPE_INTEGER}},
@@ -464,6 +500,7 @@ static const union acpi_predefined_info predefined_names[] =
464 {{"_STP", 2, ACPI_RTYPE_INTEGER}}, 500 {{"_STP", 2, ACPI_RTYPE_INTEGER}},
465 {{"_STR", 0, ACPI_RTYPE_BUFFER}}, 501 {{"_STR", 0, ACPI_RTYPE_BUFFER}},
466 {{"_STV", 2, ACPI_RTYPE_INTEGER}}, 502 {{"_STV", 2, ACPI_RTYPE_INTEGER}},
503 {{"_SUB", 0, ACPI_RTYPE_STRING}},
467 {{"_SUN", 0, ACPI_RTYPE_INTEGER}}, 504 {{"_SUN", 0, ACPI_RTYPE_INTEGER}},
468 {{"_SWS", 0, ACPI_RTYPE_INTEGER}}, 505 {{"_SWS", 0, ACPI_RTYPE_INTEGER}},
469 {{"_TC1", 0, ACPI_RTYPE_INTEGER}}, 506 {{"_TC1", 0, ACPI_RTYPE_INTEGER}},
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index f08b55b7f3a0..0347d0993497 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -73,28 +73,40 @@ typedef const struct acpi_rsconvert_info {
73 73
74/* Resource conversion opcodes */ 74/* Resource conversion opcodes */
75 75
76#define ACPI_RSC_INITGET 0 76typedef enum {
77#define ACPI_RSC_INITSET 1 77 ACPI_RSC_INITGET = 0,
78#define ACPI_RSC_FLAGINIT 2 78 ACPI_RSC_INITSET,
79#define ACPI_RSC_1BITFLAG 3 79 ACPI_RSC_FLAGINIT,
80#define ACPI_RSC_2BITFLAG 4 80 ACPI_RSC_1BITFLAG,
81#define ACPI_RSC_COUNT 5 81 ACPI_RSC_2BITFLAG,
82#define ACPI_RSC_COUNT16 6 82 ACPI_RSC_3BITFLAG,
83#define ACPI_RSC_LENGTH 7 83 ACPI_RSC_ADDRESS,
84#define ACPI_RSC_MOVE8 8 84 ACPI_RSC_BITMASK,
85#define ACPI_RSC_MOVE16 9 85 ACPI_RSC_BITMASK16,
86#define ACPI_RSC_MOVE32 10 86 ACPI_RSC_COUNT,
87#define ACPI_RSC_MOVE64 11 87 ACPI_RSC_COUNT16,
88#define ACPI_RSC_SET8 12 88 ACPI_RSC_COUNT_GPIO_PIN,
89#define ACPI_RSC_DATA8 13 89 ACPI_RSC_COUNT_GPIO_RES,
90#define ACPI_RSC_ADDRESS 14 90 ACPI_RSC_COUNT_GPIO_VEN,
91#define ACPI_RSC_SOURCE 15 91 ACPI_RSC_COUNT_SERIAL_RES,
92#define ACPI_RSC_SOURCEX 16 92 ACPI_RSC_COUNT_SERIAL_VEN,
93#define ACPI_RSC_BITMASK 17 93 ACPI_RSC_DATA8,
94#define ACPI_RSC_BITMASK16 18 94 ACPI_RSC_EXIT_EQ,
95#define ACPI_RSC_EXIT_NE 19 95 ACPI_RSC_EXIT_LE,
96#define ACPI_RSC_EXIT_LE 20 96 ACPI_RSC_EXIT_NE,
97#define ACPI_RSC_EXIT_EQ 21 97 ACPI_RSC_LENGTH,
98 ACPI_RSC_MOVE_GPIO_PIN,
99 ACPI_RSC_MOVE_GPIO_RES,
100 ACPI_RSC_MOVE_SERIAL_RES,
101 ACPI_RSC_MOVE_SERIAL_VEN,
102 ACPI_RSC_MOVE8,
103 ACPI_RSC_MOVE16,
104 ACPI_RSC_MOVE32,
105 ACPI_RSC_MOVE64,
106 ACPI_RSC_SET8,
107 ACPI_RSC_SOURCE,
108 ACPI_RSC_SOURCEX
109} ACPI_RSCONVERT_OPCODES;
98 110
99/* Resource Conversion sub-opcodes */ 111/* Resource Conversion sub-opcodes */
100 112
@@ -106,6 +118,9 @@ typedef const struct acpi_rsconvert_info {
106#define ACPI_RS_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_resource,f) 118#define ACPI_RS_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_resource,f)
107#define AML_OFFSET(f) (u8) ACPI_OFFSET (union aml_resource,f) 119#define AML_OFFSET(f) (u8) ACPI_OFFSET (union aml_resource,f)
108 120
121/*
122 * Individual entry for the resource dump tables
123 */
109typedef const struct acpi_rsdump_info { 124typedef const struct acpi_rsdump_info {
110 u8 opcode; 125 u8 opcode;
111 u8 offset; 126 u8 offset;
@@ -116,20 +131,25 @@ typedef const struct acpi_rsdump_info {
116 131
117/* Values for the Opcode field above */ 132/* Values for the Opcode field above */
118 133
119#define ACPI_RSD_TITLE 0 134typedef enum {
120#define ACPI_RSD_LITERAL 1 135 ACPI_RSD_TITLE = 0,
121#define ACPI_RSD_STRING 2 136 ACPI_RSD_1BITFLAG,
122#define ACPI_RSD_UINT8 3 137 ACPI_RSD_2BITFLAG,
123#define ACPI_RSD_UINT16 4 138 ACPI_RSD_3BITFLAG,
124#define ACPI_RSD_UINT32 5 139 ACPI_RSD_ADDRESS,
125#define ACPI_RSD_UINT64 6 140 ACPI_RSD_DWORDLIST,
126#define ACPI_RSD_1BITFLAG 7 141 ACPI_RSD_LITERAL,
127#define ACPI_RSD_2BITFLAG 8 142 ACPI_RSD_LONGLIST,
128#define ACPI_RSD_SHORTLIST 9 143 ACPI_RSD_SHORTLIST,
129#define ACPI_RSD_LONGLIST 10 144 ACPI_RSD_SHORTLISTX,
130#define ACPI_RSD_DWORDLIST 11 145 ACPI_RSD_SOURCE,
131#define ACPI_RSD_ADDRESS 12 146 ACPI_RSD_STRING,
132#define ACPI_RSD_SOURCE 13 147 ACPI_RSD_UINT8,
148 ACPI_RSD_UINT16,
149 ACPI_RSD_UINT32,
150 ACPI_RSD_UINT64,
151 ACPI_RSD_WORDLIST
152} ACPI_RSDUMP_OPCODES;
133 153
134/* restore default alignment */ 154/* restore default alignment */
135 155
@@ -138,13 +158,18 @@ typedef const struct acpi_rsdump_info {
138/* Resource tables indexed by internal resource type */ 158/* Resource tables indexed by internal resource type */
139 159
140extern const u8 acpi_gbl_aml_resource_sizes[]; 160extern const u8 acpi_gbl_aml_resource_sizes[];
161extern const u8 acpi_gbl_aml_resource_serial_bus_sizes[];
141extern struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[]; 162extern struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[];
142 163
143/* Resource tables indexed by raw AML resource descriptor type */ 164/* Resource tables indexed by raw AML resource descriptor type */
144 165
145extern const u8 acpi_gbl_resource_struct_sizes[]; 166extern const u8 acpi_gbl_resource_struct_sizes[];
167extern const u8 acpi_gbl_resource_struct_serial_bus_sizes[];
146extern struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[]; 168extern struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[];
147 169
170extern struct acpi_rsconvert_info
171 *acpi_gbl_convert_resource_serial_bus_dispatch[];
172
148struct acpi_vendor_walk_info { 173struct acpi_vendor_walk_info {
149 struct acpi_vendor_uuid *uuid; 174 struct acpi_vendor_uuid *uuid;
150 struct acpi_buffer *buffer; 175 struct acpi_buffer *buffer;
@@ -190,6 +215,10 @@ acpi_status
190acpi_rs_set_srs_method_data(struct acpi_namespace_node *node, 215acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
191 struct acpi_buffer *ret_buffer); 216 struct acpi_buffer *ret_buffer);
192 217
218acpi_status
219acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
220 struct acpi_buffer *ret_buffer);
221
193/* 222/*
194 * rscalc 223 * rscalc
195 */ 224 */
@@ -293,6 +322,11 @@ extern struct acpi_rsconvert_info acpi_rs_convert_address16[];
293extern struct acpi_rsconvert_info acpi_rs_convert_ext_irq[]; 322extern struct acpi_rsconvert_info acpi_rs_convert_ext_irq[];
294extern struct acpi_rsconvert_info acpi_rs_convert_address64[]; 323extern struct acpi_rsconvert_info acpi_rs_convert_address64[];
295extern struct acpi_rsconvert_info acpi_rs_convert_ext_address64[]; 324extern struct acpi_rsconvert_info acpi_rs_convert_ext_address64[];
325extern struct acpi_rsconvert_info acpi_rs_convert_gpio[];
326extern struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[];
327extern struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[];
328extern struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[];
329extern struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[];
296 330
297/* These resources require separate get/set tables */ 331/* These resources require separate get/set tables */
298 332
@@ -310,6 +344,7 @@ extern struct acpi_rsconvert_info acpi_rs_set_vendor[];
310 * rsinfo 344 * rsinfo
311 */ 345 */
312extern struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[]; 346extern struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[];
347extern struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[];
313 348
314/* 349/*
315 * rsdump 350 * rsdump
@@ -331,6 +366,12 @@ extern struct acpi_rsdump_info acpi_rs_dump_address64[];
331extern struct acpi_rsdump_info acpi_rs_dump_ext_address64[]; 366extern struct acpi_rsdump_info acpi_rs_dump_ext_address64[];
332extern struct acpi_rsdump_info acpi_rs_dump_ext_irq[]; 367extern struct acpi_rsdump_info acpi_rs_dump_ext_irq[];
333extern struct acpi_rsdump_info acpi_rs_dump_generic_reg[]; 368extern struct acpi_rsdump_info acpi_rs_dump_generic_reg[];
369extern struct acpi_rsdump_info acpi_rs_dump_gpio[];
370extern struct acpi_rsdump_info acpi_rs_dump_fixed_dma[];
371extern struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[];
372extern struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[];
373extern struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[];
374extern struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[];
334#endif 375#endif
335 376
336#endif /* __ACRESRC_H__ */ 377#endif /* __ACRESRC_H__ */
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 1623b245dde2..0404df605bc1 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 967f08124eba..d5bec304c823 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 99c140d8e348..925ccf22101b 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -45,6 +45,7 @@
45#define _ACUTILS_H 45#define _ACUTILS_H
46 46
47extern const u8 acpi_gbl_resource_aml_sizes[]; 47extern const u8 acpi_gbl_resource_aml_sizes[];
48extern const u8 acpi_gbl_resource_aml_serial_bus_sizes[];
48 49
49/* Strings used by the disassembler and debugger resource dump routines */ 50/* Strings used by the disassembler and debugger resource dump routines */
50 51
@@ -579,6 +580,24 @@ acpi_ut_create_list(char *list_name,
579#endif /* ACPI_DBG_TRACK_ALLOCATIONS */ 580#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
580 581
581/* 582/*
583 * utaddress - address range check
584 */
585acpi_status
586acpi_ut_add_address_range(acpi_adr_space_type space_id,
587 acpi_physical_address address,
588 u32 length, struct acpi_namespace_node *region_node);
589
590void
591acpi_ut_remove_address_range(acpi_adr_space_type space_id,
592 struct acpi_namespace_node *region_node);
593
594u32
595acpi_ut_check_address_range(acpi_adr_space_type space_id,
596 acpi_physical_address address, u32 length, u8 warn);
597
598void acpi_ut_delete_address_lists(void);
599
600/*
582 * utxferror - various error/warning output functions 601 * utxferror - various error/warning output functions
583 */ 602 */
584void ACPI_INTERNAL_VAR_XFACE 603void ACPI_INTERNAL_VAR_XFACE
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 1077f17859ed..905280fec0fa 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
7 *****************************************************************************/ 7 *****************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2011, Intel Corp. 10 * Copyright (C) 2000 - 2012, Intel Corp.
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
@@ -189,6 +189,14 @@
189#define AML_LNOTEQUAL_OP (u16) 0x9293 189#define AML_LNOTEQUAL_OP (u16) 0x9293
190 190
191/* 191/*
192 * Opcodes for "Field" operators
193 */
194#define AML_FIELD_OFFSET_OP (u8) 0x00
195#define AML_FIELD_ACCESS_OP (u8) 0x01
196#define AML_FIELD_CONNECTION_OP (u8) 0x02 /* ACPI 5.0 */
197#define AML_FIELD_EXT_ACCESS_OP (u8) 0x03 /* ACPI 5.0 */
198
199/*
192 * Internal opcodes 200 * Internal opcodes
193 * Use only "Unknown" AML opcodes, don't attempt to use 201 * Use only "Unknown" AML opcodes, don't attempt to use
194 * any valid ACPI ASCII values (A-Z, 0-9, '-') 202 * any valid ACPI ASCII values (A-Z, 0-9, '-')
@@ -202,6 +210,8 @@
202#define AML_INT_METHODCALL_OP (u16) 0x0035 210#define AML_INT_METHODCALL_OP (u16) 0x0035
203#define AML_INT_RETURN_VALUE_OP (u16) 0x0036 211#define AML_INT_RETURN_VALUE_OP (u16) 0x0036
204#define AML_INT_EVAL_SUBTREE_OP (u16) 0x0037 212#define AML_INT_EVAL_SUBTREE_OP (u16) 0x0037
213#define AML_INT_CONNECTION_OP (u16) 0x0038
214#define AML_INT_EXTACCESSFIELD_OP (u16) 0x0039
205 215
206#define ARG_NONE 0x0 216#define ARG_NONE 0x0
207 217
@@ -456,13 +466,16 @@ typedef enum {
456 * access_as keyword 466 * access_as keyword
457 */ 467 */
458typedef enum { 468typedef enum {
459 AML_FIELD_ATTRIB_SMB_QUICK = 0x02, 469 AML_FIELD_ATTRIB_QUICK = 0x02,
460 AML_FIELD_ATTRIB_SMB_SEND_RCV = 0x04, 470 AML_FIELD_ATTRIB_SEND_RCV = 0x04,
461 AML_FIELD_ATTRIB_SMB_BYTE = 0x06, 471 AML_FIELD_ATTRIB_BYTE = 0x06,
462 AML_FIELD_ATTRIB_SMB_WORD = 0x08, 472 AML_FIELD_ATTRIB_WORD = 0x08,
463 AML_FIELD_ATTRIB_SMB_BLOCK = 0x0A, 473 AML_FIELD_ATTRIB_BLOCK = 0x0A,
464 AML_FIELD_ATTRIB_SMB_WORD_CALL = 0x0C, 474 AML_FIELD_ATTRIB_MULTIBYTE = 0x0B,
465 AML_FIELD_ATTRIB_SMB_BLOCK_CALL = 0x0D 475 AML_FIELD_ATTRIB_WORD_CALL = 0x0C,
476 AML_FIELD_ATTRIB_BLOCK_CALL = 0x0D,
477 AML_FIELD_ATTRIB_RAW_BYTES = 0x0E,
478 AML_FIELD_ATTRIB_RAW_PROCESS = 0x0F
466} AML_ACCESS_ATTRIBUTE; 479} AML_ACCESS_ATTRIBUTE;
467 480
468/* Bit fields in the AML method_flags byte */ 481/* Bit fields in the AML method_flags byte */
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 59122cde247c..7b2128f274e7 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -58,29 +58,48 @@
58#define ACPI_RESTAG_TYPESPECIFICATTRIBUTES "_ATT" 58#define ACPI_RESTAG_TYPESPECIFICATTRIBUTES "_ATT"
59#define ACPI_RESTAG_BASEADDRESS "_BAS" 59#define ACPI_RESTAG_BASEADDRESS "_BAS"
60#define ACPI_RESTAG_BUSMASTER "_BM_" /* Master(1), Slave(0) */ 60#define ACPI_RESTAG_BUSMASTER "_BM_" /* Master(1), Slave(0) */
61#define ACPI_RESTAG_DEBOUNCETIME "_DBT"
61#define ACPI_RESTAG_DECODE "_DEC" 62#define ACPI_RESTAG_DECODE "_DEC"
63#define ACPI_RESTAG_DEVICEPOLARITY "_DPL"
62#define ACPI_RESTAG_DMA "_DMA" 64#define ACPI_RESTAG_DMA "_DMA"
63#define ACPI_RESTAG_DMATYPE "_TYP" /* Compatible(0), A(1), B(2), F(3) */ 65#define ACPI_RESTAG_DMATYPE "_TYP" /* Compatible(0), A(1), B(2), F(3) */
66#define ACPI_RESTAG_DRIVESTRENGTH "_DRS"
67#define ACPI_RESTAG_ENDIANNESS "_END"
68#define ACPI_RESTAG_FLOWCONTROL "_FLC"
64#define ACPI_RESTAG_GRANULARITY "_GRA" 69#define ACPI_RESTAG_GRANULARITY "_GRA"
65#define ACPI_RESTAG_INTERRUPT "_INT" 70#define ACPI_RESTAG_INTERRUPT "_INT"
66#define ACPI_RESTAG_INTERRUPTLEVEL "_LL_" /* active_lo(1), active_hi(0) */ 71#define ACPI_RESTAG_INTERRUPTLEVEL "_LL_" /* active_lo(1), active_hi(0) */
67#define ACPI_RESTAG_INTERRUPTSHARE "_SHR" /* Shareable(1), no_share(0) */ 72#define ACPI_RESTAG_INTERRUPTSHARE "_SHR" /* Shareable(1), no_share(0) */
68#define ACPI_RESTAG_INTERRUPTTYPE "_HE_" /* Edge(1), Level(0) */ 73#define ACPI_RESTAG_INTERRUPTTYPE "_HE_" /* Edge(1), Level(0) */
74#define ACPI_RESTAG_IORESTRICTION "_IOR"
69#define ACPI_RESTAG_LENGTH "_LEN" 75#define ACPI_RESTAG_LENGTH "_LEN"
76#define ACPI_RESTAG_LINE "_LIN"
70#define ACPI_RESTAG_MEMATTRIBUTES "_MTP" /* Memory(0), Reserved(1), ACPI(2), NVS(3) */ 77#define ACPI_RESTAG_MEMATTRIBUTES "_MTP" /* Memory(0), Reserved(1), ACPI(2), NVS(3) */
71#define ACPI_RESTAG_MEMTYPE "_MEM" /* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */ 78#define ACPI_RESTAG_MEMTYPE "_MEM" /* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */
72#define ACPI_RESTAG_MAXADDR "_MAX" 79#define ACPI_RESTAG_MAXADDR "_MAX"
73#define ACPI_RESTAG_MINADDR "_MIN" 80#define ACPI_RESTAG_MINADDR "_MIN"
74#define ACPI_RESTAG_MAXTYPE "_MAF" 81#define ACPI_RESTAG_MAXTYPE "_MAF"
75#define ACPI_RESTAG_MINTYPE "_MIF" 82#define ACPI_RESTAG_MINTYPE "_MIF"
83#define ACPI_RESTAG_MODE "_MOD"
84#define ACPI_RESTAG_PARITY "_PAR"
85#define ACPI_RESTAG_PHASE "_PHA"
86#define ACPI_RESTAG_PIN "_PIN"
87#define ACPI_RESTAG_PINCONFIG "_PPI"
88#define ACPI_RESTAG_POLARITY "_POL"
76#define ACPI_RESTAG_REGISTERBITOFFSET "_RBO" 89#define ACPI_RESTAG_REGISTERBITOFFSET "_RBO"
77#define ACPI_RESTAG_REGISTERBITWIDTH "_RBW" 90#define ACPI_RESTAG_REGISTERBITWIDTH "_RBW"
78#define ACPI_RESTAG_RANGETYPE "_RNG" 91#define ACPI_RESTAG_RANGETYPE "_RNG"
79#define ACPI_RESTAG_READWRITETYPE "_RW_" /* read_only(0), Writeable (1) */ 92#define ACPI_RESTAG_READWRITETYPE "_RW_" /* read_only(0), Writeable (1) */
93#define ACPI_RESTAG_LENGTH_RX "_RXL"
94#define ACPI_RESTAG_LENGTH_TX "_TXL"
95#define ACPI_RESTAG_SLAVEMODE "_SLV"
96#define ACPI_RESTAG_SPEED "_SPE"
97#define ACPI_RESTAG_STOPBITS "_STB"
80#define ACPI_RESTAG_TRANSLATION "_TRA" 98#define ACPI_RESTAG_TRANSLATION "_TRA"
81#define ACPI_RESTAG_TRANSTYPE "_TRS" /* Sparse(1), Dense(0) */ 99#define ACPI_RESTAG_TRANSTYPE "_TRS" /* Sparse(1), Dense(0) */
82#define ACPI_RESTAG_TYPE "_TTP" /* Translation(1), Static (0) */ 100#define ACPI_RESTAG_TYPE "_TTP" /* Translation(1), Static (0) */
83#define ACPI_RESTAG_XFERTYPE "_SIZ" /* 8(0), 8_and16(1), 16(2) */ 101#define ACPI_RESTAG_XFERTYPE "_SIZ" /* 8(0), 8_and16(1), 16(2) */
102#define ACPI_RESTAG_VENDORDATA "_VEN"
84 103
85/* Default sizes for "small" resource descriptors */ 104/* Default sizes for "small" resource descriptors */
86 105
@@ -90,6 +109,7 @@
90#define ASL_RDESC_END_DEPEND_SIZE 0x00 109#define ASL_RDESC_END_DEPEND_SIZE 0x00
91#define ASL_RDESC_IO_SIZE 0x07 110#define ASL_RDESC_IO_SIZE 0x07
92#define ASL_RDESC_FIXED_IO_SIZE 0x03 111#define ASL_RDESC_FIXED_IO_SIZE 0x03
112#define ASL_RDESC_FIXED_DMA_SIZE 0x05
93#define ASL_RDESC_END_TAG_SIZE 0x01 113#define ASL_RDESC_END_TAG_SIZE 0x01
94 114
95struct asl_resource_node { 115struct asl_resource_node {
@@ -164,6 +184,12 @@ struct aml_resource_end_tag {
164 AML_RESOURCE_SMALL_HEADER_COMMON u8 checksum; 184 AML_RESOURCE_SMALL_HEADER_COMMON u8 checksum;
165}; 185};
166 186
187struct aml_resource_fixed_dma {
188 AML_RESOURCE_SMALL_HEADER_COMMON u16 request_lines;
189 u16 channels;
190 u8 width;
191};
192
167/* 193/*
168 * LARGE descriptors 194 * LARGE descriptors
169 */ 195 */
@@ -263,6 +289,110 @@ struct aml_resource_generic_register {
263 u64 address; 289 u64 address;
264}; 290};
265 291
292/* Common descriptor for gpio_int and gpio_io (ACPI 5.0) */
293
294struct aml_resource_gpio {
295 AML_RESOURCE_LARGE_HEADER_COMMON u8 revision_id;
296 u8 connection_type;
297 u16 flags;
298 u16 int_flags;
299 u8 pin_config;
300 u16 drive_strength;
301 u16 debounce_timeout;
302 u16 pin_table_offset;
303 u8 res_source_index;
304 u16 res_source_offset;
305 u16 vendor_offset;
306 u16 vendor_length;
307 /*
308 * Optional fields follow immediately:
309 * 1) PIN list (Words)
310 * 2) Resource Source String
311 * 3) Vendor Data bytes
312 */
313};
314
315#define AML_RESOURCE_GPIO_REVISION 1 /* ACPI 5.0 */
316
317/* Values for connection_type above */
318
319#define AML_RESOURCE_GPIO_TYPE_INT 0
320#define AML_RESOURCE_GPIO_TYPE_IO 1
321#define AML_RESOURCE_MAX_GPIOTYPE 1
322
323/* Common preamble for all serial descriptors (ACPI 5.0) */
324
325#define AML_RESOURCE_SERIAL_COMMON \
326 u8 revision_id; \
327 u8 res_source_index; \
328 u8 type; \
329 u8 flags; \
330 u16 type_specific_flags; \
331 u8 type_revision_id; \
332 u16 type_data_length; \
333
334/* Values for the type field above */
335
336#define AML_RESOURCE_I2C_SERIALBUSTYPE 1
337#define AML_RESOURCE_SPI_SERIALBUSTYPE 2
338#define AML_RESOURCE_UART_SERIALBUSTYPE 3
339#define AML_RESOURCE_MAX_SERIALBUSTYPE 3
340#define AML_RESOURCE_VENDOR_SERIALBUSTYPE 192 /* Vendor defined is 0xC0-0xFF (NOT SUPPORTED) */
341
342struct aml_resource_common_serialbus {
343AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_SERIAL_COMMON};
344
345struct aml_resource_i2c_serialbus {
346 AML_RESOURCE_LARGE_HEADER_COMMON
347 AML_RESOURCE_SERIAL_COMMON u32 connection_speed;
348 u16 slave_address;
349 /*
350 * Optional fields follow immediately:
351 * 1) Vendor Data bytes
352 * 2) Resource Source String
353 */
354};
355
356#define AML_RESOURCE_I2C_REVISION 1 /* ACPI 5.0 */
357#define AML_RESOURCE_I2C_TYPE_REVISION 1 /* ACPI 5.0 */
358#define AML_RESOURCE_I2C_MIN_DATA_LEN 6
359
360struct aml_resource_spi_serialbus {
361 AML_RESOURCE_LARGE_HEADER_COMMON
362 AML_RESOURCE_SERIAL_COMMON u32 connection_speed;
363 u8 data_bit_length;
364 u8 clock_phase;
365 u8 clock_polarity;
366 u16 device_selection;
367 /*
368 * Optional fields follow immediately:
369 * 1) Vendor Data bytes
370 * 2) Resource Source String
371 */
372};
373
374#define AML_RESOURCE_SPI_REVISION 1 /* ACPI 5.0 */
375#define AML_RESOURCE_SPI_TYPE_REVISION 1 /* ACPI 5.0 */
376#define AML_RESOURCE_SPI_MIN_DATA_LEN 9
377
378struct aml_resource_uart_serialbus {
379 AML_RESOURCE_LARGE_HEADER_COMMON
380 AML_RESOURCE_SERIAL_COMMON u32 default_baud_rate;
381 u16 rx_fifo_size;
382 u16 tx_fifo_size;
383 u8 parity;
384 u8 lines_enabled;
385 /*
386 * Optional fields follow immediately:
387 * 1) Vendor Data bytes
388 * 2) Resource Source String
389 */
390};
391
392#define AML_RESOURCE_UART_REVISION 1 /* ACPI 5.0 */
393#define AML_RESOURCE_UART_TYPE_REVISION 1 /* ACPI 5.0 */
394#define AML_RESOURCE_UART_MIN_DATA_LEN 10
395
266/* restore default alignment */ 396/* restore default alignment */
267 397
268#pragma pack() 398#pragma pack()
@@ -284,6 +414,7 @@ union aml_resource {
284 struct aml_resource_end_dependent end_dpf; 414 struct aml_resource_end_dependent end_dpf;
285 struct aml_resource_io io; 415 struct aml_resource_io io;
286 struct aml_resource_fixed_io fixed_io; 416 struct aml_resource_fixed_io fixed_io;
417 struct aml_resource_fixed_dma fixed_dma;
287 struct aml_resource_vendor_small vendor_small; 418 struct aml_resource_vendor_small vendor_small;
288 struct aml_resource_end_tag end_tag; 419 struct aml_resource_end_tag end_tag;
289 420
@@ -299,6 +430,11 @@ union aml_resource {
299 struct aml_resource_address64 address64; 430 struct aml_resource_address64 address64;
300 struct aml_resource_extended_address64 ext_address64; 431 struct aml_resource_extended_address64 ext_address64;
301 struct aml_resource_extended_irq extended_irq; 432 struct aml_resource_extended_irq extended_irq;
433 struct aml_resource_gpio gpio;
434 struct aml_resource_i2c_serialbus i2c_serial_bus;
435 struct aml_resource_spi_serialbus spi_serial_bus;
436 struct aml_resource_uart_serialbus uart_serial_bus;
437 struct aml_resource_common_serialbus common_serial_bus;
302 438
303 /* Utility overlays */ 439 /* Utility overlays */
304 440
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 8c7b99728aa2..80eb1900297f 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -250,6 +250,13 @@ acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
250 status = acpi_ds_execute_arguments(node, node->parent, 250 status = acpi_ds_execute_arguments(node, node->parent,
251 extra_desc->extra.aml_length, 251 extra_desc->extra.aml_length,
252 extra_desc->extra.aml_start); 252 extra_desc->extra.aml_start);
253 if (ACPI_FAILURE(status)) {
254 return_ACPI_STATUS(status);
255 }
256
257 status = acpi_ut_add_address_range(obj_desc->region.space_id,
258 obj_desc->region.address,
259 obj_desc->region.length, node);
253 return_ACPI_STATUS(status); 260 return_ACPI_STATUS(status);
254} 261}
255 262
@@ -384,8 +391,15 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
384 391
385 /* Execute the argument AML */ 392 /* Execute the argument AML */
386 393
387 status = acpi_ds_execute_arguments(node, node->parent, 394 status = acpi_ds_execute_arguments(node, extra_desc->extra.scope_node,
388 extra_desc->extra.aml_length, 395 extra_desc->extra.aml_length,
389 extra_desc->extra.aml_start); 396 extra_desc->extra.aml_start);
397 if (ACPI_FAILURE(status)) {
398 return_ACPI_STATUS(status);
399 }
400
401 status = acpi_ut_add_address_range(obj_desc->region.space_id,
402 obj_desc->region.address,
403 obj_desc->region.length, node);
390 return_ACPI_STATUS(status); 404 return_ACPI_STATUS(status);
391} 405}
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 26c49fff58da..effe4ca1133f 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 34be60c0e448..cd243cf2cab2 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -221,6 +221,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
221{ 221{
222 acpi_status status; 222 acpi_status status;
223 u64 position; 223 u64 position;
224 union acpi_parse_object *child;
224 225
225 ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info); 226 ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info);
226 227
@@ -232,10 +233,11 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
232 233
233 while (arg) { 234 while (arg) {
234 /* 235 /*
235 * Three types of field elements are handled: 236 * Four types of field elements are handled:
236 * 1) Offset - specifies a bit offset 237 * 1) Name - Enters a new named field into the namespace
237 * 2) access_as - changes the access mode 238 * 2) Offset - specifies a bit offset
238 * 3) Name - Enters a new named field into the namespace 239 * 3) access_as - changes the access mode/attributes
240 * 4) Connection - Associate a resource template with the field
239 */ 241 */
240 switch (arg->common.aml_opcode) { 242 switch (arg->common.aml_opcode) {
241 case AML_INT_RESERVEDFIELD_OP: 243 case AML_INT_RESERVEDFIELD_OP:
@@ -253,21 +255,70 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
253 break; 255 break;
254 256
255 case AML_INT_ACCESSFIELD_OP: 257 case AML_INT_ACCESSFIELD_OP:
256 258 case AML_INT_EXTACCESSFIELD_OP:
257 /* 259 /*
258 * Get a new access_type and access_attribute -- to be used for all 260 * Get new access_type, access_attribute, and access_length fields
259 * field units that follow, until field end or another access_as 261 * -- to be used for all field units that follow, until the
260 * keyword. 262 * end-of-field or another access_as keyword is encountered.
263 * NOTE. These three bytes are encoded in the integer value
264 * of the parseop for convenience.
261 * 265 *
262 * In field_flags, preserve the flag bits other than the 266 * In field_flags, preserve the flag bits other than the
263 * ACCESS_TYPE bits 267 * ACCESS_TYPE bits.
264 */ 268 */
269
270 /* access_type (byte_acc, word_acc, etc.) */
271
265 info->field_flags = (u8) 272 info->field_flags = (u8)
266 ((info-> 273 ((info->
267 field_flags & ~(AML_FIELD_ACCESS_TYPE_MASK)) | 274 field_flags & ~(AML_FIELD_ACCESS_TYPE_MASK)) |
268 ((u8) ((u32) arg->common.value.integer >> 8))); 275 ((u8)((u32)(arg->common.value.integer & 0x07))));
276
277 /* access_attribute (attrib_quick, attrib_byte, etc.) */
278
279 info->attribute =
280 (u8)((arg->common.value.integer >> 8) & 0xFF);
281
282 /* access_length (for serial/buffer protocols) */
283
284 info->access_length =
285 (u8)((arg->common.value.integer >> 16) & 0xFF);
286 break;
287
288 case AML_INT_CONNECTION_OP:
289 /*
290 * Clear any previous connection. New connection is used for all
291 * fields that follow, similar to access_as
292 */
293 info->resource_buffer = NULL;
294 info->connection_node = NULL;
269 295
270 info->attribute = (u8) (arg->common.value.integer); 296 /*
297 * A Connection() is either an actual resource descriptor (buffer)
298 * or a named reference to a resource template
299 */
300 child = arg->common.value.arg;
301 if (child->common.aml_opcode == AML_INT_BYTELIST_OP) {
302 info->resource_buffer = child->named.data;
303 info->resource_length =
304 (u16)child->named.value.integer;
305 } else {
306 /* Lookup the Connection() namepath, it should already exist */
307
308 status = acpi_ns_lookup(walk_state->scope_info,
309 child->common.value.
310 name, ACPI_TYPE_ANY,
311 ACPI_IMODE_EXECUTE,
312 ACPI_NS_DONT_OPEN_SCOPE,
313 walk_state,
314 &info->connection_node);
315 if (ACPI_FAILURE(status)) {
316 ACPI_ERROR_NAMESPACE(child->common.
317 value.name,
318 status);
319 return_ACPI_STATUS(status);
320 }
321 }
271 break; 322 break;
272 323
273 case AML_INT_NAMEDFIELD_OP: 324 case AML_INT_NAMEDFIELD_OP:
@@ -374,6 +425,8 @@ acpi_ds_create_field(union acpi_parse_object *op,
374 } 425 }
375 } 426 }
376 427
428 ACPI_MEMSET(&info, 0, sizeof(struct acpi_create_field_info));
429
377 /* Second arg is the field flags */ 430 /* Second arg is the field flags */
378 431
379 arg = arg->common.next; 432 arg = arg->common.next;
@@ -386,7 +439,6 @@ acpi_ds_create_field(union acpi_parse_object *op,
386 info.region_node = region_node; 439 info.region_node = region_node;
387 440
388 status = acpi_ds_get_field_names(&info, walk_state, arg->common.next); 441 status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
389
390 return_ACPI_STATUS(status); 442 return_ACPI_STATUS(status);
391} 443}
392 444
@@ -474,8 +526,8 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
474 */ 526 */
475 while (arg) { 527 while (arg) {
476 /* 528 /*
477 * Ignore OFFSET and ACCESSAS terms here; we are only interested in the 529 * Ignore OFFSET/ACCESSAS/CONNECTION terms here; we are only interested
478 * field names in order to enter them into the namespace. 530 * in the field names in order to enter them into the namespace.
479 */ 531 */
480 if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) { 532 if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
481 status = acpi_ns_lookup(walk_state->scope_info, 533 status = acpi_ns_lookup(walk_state->scope_info,
@@ -651,6 +703,5 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
651 info.region_node = region_node; 703 info.region_node = region_node;
652 704
653 status = acpi_ds_get_field_names(&info, walk_state, arg->common.next); 705 status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
654
655 return_ACPI_STATUS(status); 706 return_ACPI_STATUS(status);
656} 707}
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index a7718bf2b9a1..9e5ac7f780a7 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 5d797751e205..00f5dab5bcc0 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index 905ce29a92e1..b40bd507be5d 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index f42e17e5c252..d7045ca3e32a 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index c627a288e027..e5eff7585102 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 2c477ce172fa..1abcda31037f 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index fe40e4c6554f..642f3c053e87 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 324acec1179a..552aa3a50c84 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 976318138c56..ae7147724763 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 76a661fc1e09..9e9490a9cbf0 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index a6c374ef9914..c9c2ac13e7cc 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index d458b041e651..6729ebe2f1e6 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -71,6 +71,12 @@ acpi_status acpi_ev_initialize_events(void)
71 71
72 ACPI_FUNCTION_TRACE(ev_initialize_events); 72 ACPI_FUNCTION_TRACE(ev_initialize_events);
73 73
74 /* If Hardware Reduced flag is set, there are no fixed events */
75
76 if (acpi_gbl_reduced_hardware) {
77 return_ACPI_STATUS(AE_OK);
78 }
79
74 /* 80 /*
75 * Initialize the Fixed and General Purpose Events. This is done prior to 81 * Initialize the Fixed and General Purpose Events. This is done prior to
76 * enabling SCIs to prevent interrupts from occurring before the handlers 82 * enabling SCIs to prevent interrupts from occurring before the handlers
@@ -111,6 +117,12 @@ acpi_status acpi_ev_install_xrupt_handlers(void)
111 117
112 ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers); 118 ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers);
113 119
120 /* If Hardware Reduced flag is set, there is no ACPI h/w */
121
122 if (acpi_gbl_reduced_hardware) {
123 return_ACPI_STATUS(AE_OK);
124 }
125
114 /* Install the SCI handler */ 126 /* Install the SCI handler */
115 127
116 status = acpi_ev_install_sci_handler(); 128 status = acpi_ev_install_sci_handler();
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 56a562a1e5d7..5e5683cb1f0d 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,12 @@ acpi_status acpi_ev_init_global_lock_handler(void)
70 70
71 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler); 71 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
72 72
73 /* If Hardware Reduced flag is set, there is no global lock */
74
75 if (acpi_gbl_reduced_hardware) {
76 return_ACPI_STATUS(AE_OK);
77 }
78
73 /* Attempt installation of the global lock handler */ 79 /* Attempt installation of the global lock handler */
74 80
75 status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL, 81 status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 65c79add3b19..9e88cb6fb25e 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index ca2c41a53311..be75339cd5dd 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index ce9aa9f9a972..adf7494da9db 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 80a81d0c4a80..25073932aa10 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index d0b331844427..84966f416463 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index f0edf5c43c03..1b0180a1b798 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -329,6 +329,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
329 * FUNCTION: acpi_ev_address_space_dispatch 329 * FUNCTION: acpi_ev_address_space_dispatch
330 * 330 *
331 * PARAMETERS: region_obj - Internal region object 331 * PARAMETERS: region_obj - Internal region object
332 * field_obj - Corresponding field. Can be NULL.
332 * Function - Read or Write operation 333 * Function - Read or Write operation
333 * region_offset - Where in the region to read or write 334 * region_offset - Where in the region to read or write
334 * bit_width - Field width in bits (8, 16, 32, or 64) 335 * bit_width - Field width in bits (8, 16, 32, or 64)
@@ -344,6 +345,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
344 345
345acpi_status 346acpi_status
346acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, 347acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
348 union acpi_operand_object *field_obj,
347 u32 function, 349 u32 function,
348 u32 region_offset, u32 bit_width, u64 *value) 350 u32 region_offset, u32 bit_width, u64 *value)
349{ 351{
@@ -353,6 +355,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
353 union acpi_operand_object *handler_desc; 355 union acpi_operand_object *handler_desc;
354 union acpi_operand_object *region_obj2; 356 union acpi_operand_object *region_obj2;
355 void *region_context = NULL; 357 void *region_context = NULL;
358 struct acpi_connection_info *context;
356 359
357 ACPI_FUNCTION_TRACE(ev_address_space_dispatch); 360 ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
358 361
@@ -375,6 +378,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
375 return_ACPI_STATUS(AE_NOT_EXIST); 378 return_ACPI_STATUS(AE_NOT_EXIST);
376 } 379 }
377 380
381 context = handler_desc->address_space.context;
382
378 /* 383 /*
379 * It may be the case that the region has never been initialized. 384 * It may be the case that the region has never been initialized.
380 * Some types of regions require special init code 385 * Some types of regions require special init code
@@ -404,8 +409,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
404 acpi_ex_exit_interpreter(); 409 acpi_ex_exit_interpreter();
405 410
406 status = region_setup(region_obj, ACPI_REGION_ACTIVATE, 411 status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
407 handler_desc->address_space.context, 412 context, &region_context);
408 &region_context);
409 413
410 /* Re-enter the interpreter */ 414 /* Re-enter the interpreter */
411 415
@@ -455,6 +459,25 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
455 acpi_ut_get_region_name(region_obj->region. 459 acpi_ut_get_region_name(region_obj->region.
456 space_id))); 460 space_id)));
457 461
462 /*
463 * Special handling for generic_serial_bus and general_purpose_io:
464 * There are three extra parameters that must be passed to the
465 * handler via the context:
466 * 1) Connection buffer, a resource template from Connection() op.
467 * 2) Length of the above buffer.
468 * 3) Actual access length from the access_as() op.
469 */
470 if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) ||
471 (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) &&
472 context && field_obj) {
473
474 /* Get the Connection (resource_template) buffer */
475
476 context->connection = field_obj->field.resource_buffer;
477 context->length = field_obj->field.resource_length;
478 context->access_length = field_obj->field.access_length;
479 }
480
458 if (!(handler_desc->address_space.handler_flags & 481 if (!(handler_desc->address_space.handler_flags &
459 ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { 482 ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
460 /* 483 /*
@@ -469,7 +492,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
469 492
470 status = handler(function, 493 status = handler(function,
471 (region_obj->region.address + region_offset), 494 (region_obj->region.address + region_offset),
472 bit_width, value, handler_desc->address_space.context, 495 bit_width, value, context,
473 region_obj2->extra.region_context); 496 region_obj2->extra.region_context);
474 497
475 if (ACPI_FAILURE(status)) { 498 if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 55a5d35ef34a..819c17f5897a 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 2ebd40e1a3ef..26065c612e76 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index f4f523bf5939..61944e89565a 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 20516e599476..1768bbec1002 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index f06a3ee356ba..33388fd69df4 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index aee887e3ca5c..6019208cd4b6 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 745a42b401f5..c86d44e41bc8 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -297,9 +297,9 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
297 /* Bytewise reads */ 297 /* Bytewise reads */
298 298
299 for (i = 0; i < length; i++) { 299 for (i = 0; i < length; i++) {
300 status = acpi_ev_address_space_dispatch(obj_desc, ACPI_READ, 300 status =
301 region_offset, 8, 301 acpi_ev_address_space_dispatch(obj_desc, NULL, ACPI_READ,
302 &value); 302 region_offset, 8, &value);
303 if (ACPI_FAILURE(status)) { 303 if (ACPI_FAILURE(status)) {
304 return status; 304 return status;
305 } 305 }
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 74162a11817d..e385436bd424 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 110711afada8..3f5bc998c1cb 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -267,7 +267,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
267 * 267 *
268 * PARAMETERS: aml_start - Pointer to the region declaration AML 268 * PARAMETERS: aml_start - Pointer to the region declaration AML
269 * aml_length - Max length of the declaration AML 269 * aml_length - Max length of the declaration AML
270 * region_space - space_iD for the region 270 * space_id - Address space ID for the region
271 * walk_state - Current state 271 * walk_state - Current state
272 * 272 *
273 * RETURN: Status 273 * RETURN: Status
@@ -279,7 +279,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
279acpi_status 279acpi_status
280acpi_ex_create_region(u8 * aml_start, 280acpi_ex_create_region(u8 * aml_start,
281 u32 aml_length, 281 u32 aml_length,
282 u8 region_space, struct acpi_walk_state *walk_state) 282 u8 space_id, struct acpi_walk_state *walk_state)
283{ 283{
284 acpi_status status; 284 acpi_status status;
285 union acpi_operand_object *obj_desc; 285 union acpi_operand_object *obj_desc;
@@ -304,16 +304,19 @@ acpi_ex_create_region(u8 * aml_start,
304 * Space ID must be one of the predefined IDs, or in the user-defined 304 * Space ID must be one of the predefined IDs, or in the user-defined
305 * range 305 * range
306 */ 306 */
307 if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) && 307 if (!acpi_is_valid_space_id(space_id)) {
308 (region_space < ACPI_USER_REGION_BEGIN) && 308 /*
309 (region_space != ACPI_ADR_SPACE_DATA_TABLE)) { 309 * Print an error message, but continue. We don't want to abort
310 ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X", 310 * a table load for this exception. Instead, if the region is
311 region_space)); 311 * actually used at runtime, abort the executing method.
312 return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID); 312 */
313 ACPI_ERROR((AE_INFO,
314 "Invalid/unknown Address Space ID: 0x%2.2X",
315 space_id));
313 } 316 }
314 317
315 ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (0x%X)\n", 318 ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (0x%X)\n",
316 acpi_ut_get_region_name(region_space), region_space)); 319 acpi_ut_get_region_name(space_id), space_id));
317 320
318 /* Create the region descriptor */ 321 /* Create the region descriptor */
319 322
@@ -330,10 +333,16 @@ acpi_ex_create_region(u8 * aml_start,
330 region_obj2 = obj_desc->common.next_object; 333 region_obj2 = obj_desc->common.next_object;
331 region_obj2->extra.aml_start = aml_start; 334 region_obj2->extra.aml_start = aml_start;
332 region_obj2->extra.aml_length = aml_length; 335 region_obj2->extra.aml_length = aml_length;
336 if (walk_state->scope_info) {
337 region_obj2->extra.scope_node =
338 walk_state->scope_info->scope.node;
339 } else {
340 region_obj2->extra.scope_node = node;
341 }
333 342
334 /* Init the region from the operands */ 343 /* Init the region from the operands */
335 344
336 obj_desc->region.space_id = region_space; 345 obj_desc->region.space_id = space_id;
337 obj_desc->region.address = 0; 346 obj_desc->region.address = 0;
338 obj_desc->region.length = 0; 347 obj_desc->region.length = 0;
339 obj_desc->region.node = node; 348 obj_desc->region.node = node;
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index c7a2f1edd282..e211e9c19215 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 61b8c0e8b74d..2a6ac0a3bc1e 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -192,10 +192,13 @@ static struct acpi_exdump_info acpi_ex_dump_buffer_field[3] = {
192 "Buffer Object"} 192 "Buffer Object"}
193}; 193};
194 194
195static struct acpi_exdump_info acpi_ex_dump_region_field[3] = { 195static struct acpi_exdump_info acpi_ex_dump_region_field[5] = {
196 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region_field), NULL}, 196 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region_field), NULL},
197 {ACPI_EXD_FIELD, 0, NULL}, 197 {ACPI_EXD_FIELD, 0, NULL},
198 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"} 198 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(field.access_length), "AccessLength"},
199 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"},
200 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.resource_buffer),
201 "ResourceBuffer"}
199}; 202};
200 203
201static struct acpi_exdump_info acpi_ex_dump_bank_field[5] = { 204static struct acpi_exdump_info acpi_ex_dump_bank_field[5] = {
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 0bde2230c028..dc092f5b35d6 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -100,18 +100,25 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
100 (obj_desc->field.region_obj->region.space_id == 100 (obj_desc->field.region_obj->region.space_id ==
101 ACPI_ADR_SPACE_SMBUS 101 ACPI_ADR_SPACE_SMBUS
102 || obj_desc->field.region_obj->region.space_id == 102 || obj_desc->field.region_obj->region.space_id ==
103 ACPI_ADR_SPACE_GSBUS
104 || obj_desc->field.region_obj->region.space_id ==
103 ACPI_ADR_SPACE_IPMI)) { 105 ACPI_ADR_SPACE_IPMI)) {
104 /* 106 /*
105 * This is an SMBus or IPMI read. We must create a buffer to hold 107 * This is an SMBus, GSBus or IPMI read. We must create a buffer to hold
106 * the data and then directly access the region handler. 108 * the data and then directly access the region handler.
107 * 109 *
108 * Note: Smbus protocol value is passed in upper 16-bits of Function 110 * Note: SMBus and GSBus protocol value is passed in upper 16-bits of Function
109 */ 111 */
110 if (obj_desc->field.region_obj->region.space_id == 112 if (obj_desc->field.region_obj->region.space_id ==
111 ACPI_ADR_SPACE_SMBUS) { 113 ACPI_ADR_SPACE_SMBUS) {
112 length = ACPI_SMBUS_BUFFER_SIZE; 114 length = ACPI_SMBUS_BUFFER_SIZE;
113 function = 115 function =
114 ACPI_READ | (obj_desc->field.attribute << 16); 116 ACPI_READ | (obj_desc->field.attribute << 16);
117 } else if (obj_desc->field.region_obj->region.space_id ==
118 ACPI_ADR_SPACE_GSBUS) {
119 length = ACPI_GSBUS_BUFFER_SIZE;
120 function =
121 ACPI_READ | (obj_desc->field.attribute << 16);
115 } else { /* IPMI */ 122 } else { /* IPMI */
116 123
117 length = ACPI_IPMI_BUFFER_SIZE; 124 length = ACPI_IPMI_BUFFER_SIZE;
@@ -248,21 +255,23 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
248 (obj_desc->field.region_obj->region.space_id == 255 (obj_desc->field.region_obj->region.space_id ==
249 ACPI_ADR_SPACE_SMBUS 256 ACPI_ADR_SPACE_SMBUS
250 || obj_desc->field.region_obj->region.space_id == 257 || obj_desc->field.region_obj->region.space_id ==
258 ACPI_ADR_SPACE_GSBUS
259 || obj_desc->field.region_obj->region.space_id ==
251 ACPI_ADR_SPACE_IPMI)) { 260 ACPI_ADR_SPACE_IPMI)) {
252 /* 261 /*
253 * This is an SMBus or IPMI write. We will bypass the entire field 262 * This is an SMBus, GSBus or IPMI write. We will bypass the entire field
254 * mechanism and handoff the buffer directly to the handler. For 263 * mechanism and handoff the buffer directly to the handler. For
255 * these address spaces, the buffer is bi-directional; on a write, 264 * these address spaces, the buffer is bi-directional; on a write,
256 * return data is returned in the same buffer. 265 * return data is returned in the same buffer.
257 * 266 *
258 * Source must be a buffer of sufficient size: 267 * Source must be a buffer of sufficient size:
259 * ACPI_SMBUS_BUFFER_SIZE or ACPI_IPMI_BUFFER_SIZE. 268 * ACPI_SMBUS_BUFFER_SIZE, ACPI_GSBUS_BUFFER_SIZE, or ACPI_IPMI_BUFFER_SIZE.
260 * 269 *
261 * Note: SMBus protocol type is passed in upper 16-bits of Function 270 * Note: SMBus and GSBus protocol type is passed in upper 16-bits of Function
262 */ 271 */
263 if (source_desc->common.type != ACPI_TYPE_BUFFER) { 272 if (source_desc->common.type != ACPI_TYPE_BUFFER) {
264 ACPI_ERROR((AE_INFO, 273 ACPI_ERROR((AE_INFO,
265 "SMBus or IPMI write requires Buffer, found type %s", 274 "SMBus/IPMI/GenericSerialBus write requires Buffer, found type %s",
266 acpi_ut_get_object_type_name(source_desc))); 275 acpi_ut_get_object_type_name(source_desc)));
267 276
268 return_ACPI_STATUS(AE_AML_OPERAND_TYPE); 277 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -273,6 +282,11 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
273 length = ACPI_SMBUS_BUFFER_SIZE; 282 length = ACPI_SMBUS_BUFFER_SIZE;
274 function = 283 function =
275 ACPI_WRITE | (obj_desc->field.attribute << 16); 284 ACPI_WRITE | (obj_desc->field.attribute << 16);
285 } else if (obj_desc->field.region_obj->region.space_id ==
286 ACPI_ADR_SPACE_GSBUS) {
287 length = ACPI_GSBUS_BUFFER_SIZE;
288 function =
289 ACPI_WRITE | (obj_desc->field.attribute << 16);
276 } else { /* IPMI */ 290 } else { /* IPMI */
277 291
278 length = ACPI_IPMI_BUFFER_SIZE; 292 length = ACPI_IPMI_BUFFER_SIZE;
@@ -281,7 +295,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
281 295
282 if (source_desc->buffer.length < length) { 296 if (source_desc->buffer.length < length) {
283 ACPI_ERROR((AE_INFO, 297 ACPI_ERROR((AE_INFO,
284 "SMBus or IPMI write requires Buffer of length %u, found length %u", 298 "SMBus/IPMI/GenericSerialBus write requires Buffer of length %u, found length %u",
285 length, source_desc->buffer.length)); 299 length, source_desc->buffer.length));
286 300
287 return_ACPI_STATUS(AE_AML_BUFFER_LIMIT); 301 return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index f915a7f3f921..149de45fdadd 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -86,6 +86,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
86{ 86{
87 acpi_status status = AE_OK; 87 acpi_status status = AE_OK;
88 union acpi_operand_object *rgn_desc; 88 union acpi_operand_object *rgn_desc;
89 u8 space_id;
89 90
90 ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset); 91 ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset);
91 92
@@ -101,6 +102,17 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
101 return_ACPI_STATUS(AE_AML_OPERAND_TYPE); 102 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
102 } 103 }
103 104
105 space_id = rgn_desc->region.space_id;
106
107 /* Validate the Space ID */
108
109 if (!acpi_is_valid_space_id(space_id)) {
110 ACPI_ERROR((AE_INFO,
111 "Invalid/unknown Address Space ID: 0x%2.2X",
112 space_id));
113 return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
114 }
115
104 /* 116 /*
105 * If the Region Address and Length have not been previously evaluated, 117 * If the Region Address and Length have not been previously evaluated,
106 * evaluate them now and save the results. 118 * evaluate them now and save the results.
@@ -119,11 +131,12 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
119 } 131 }
120 132
121 /* 133 /*
122 * Exit now for SMBus or IPMI address space, it has a non-linear 134 * Exit now for SMBus, GSBus or IPMI address space, it has a non-linear
123 * address space and the request cannot be directly validated 135 * address space and the request cannot be directly validated
124 */ 136 */
125 if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS || 137 if (space_id == ACPI_ADR_SPACE_SMBUS ||
126 rgn_desc->region.space_id == ACPI_ADR_SPACE_IPMI) { 138 space_id == ACPI_ADR_SPACE_GSBUS ||
139 space_id == ACPI_ADR_SPACE_IPMI) {
127 140
128 /* SMBus or IPMI has a non-linear address space */ 141 /* SMBus or IPMI has a non-linear address space */
129 142
@@ -271,11 +284,12 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
271 284
272 /* Invoke the appropriate address_space/op_region handler */ 285 /* Invoke the appropriate address_space/op_region handler */
273 286
274 status = 287 status = acpi_ev_address_space_dispatch(rgn_desc, obj_desc,
275 acpi_ev_address_space_dispatch(rgn_desc, function, region_offset, 288 function, region_offset,
276 ACPI_MUL_8(obj_desc->common_field. 289 ACPI_MUL_8(obj_desc->
277 access_byte_width), 290 common_field.
278 value); 291 access_byte_width),
292 value);
279 293
280 if (ACPI_FAILURE(status)) { 294 if (ACPI_FAILURE(status)) {
281 if (status == AE_NOT_IMPLEMENTED) { 295 if (status == AE_NOT_IMPLEMENTED) {
@@ -316,6 +330,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
316static u8 330static u8
317acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value) 331acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
318{ 332{
333 ACPI_FUNCTION_NAME(ex_register_overflow);
319 334
320 if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) { 335 if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) {
321 /* 336 /*
@@ -330,6 +345,11 @@ acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
330 * The Value is larger than the maximum value that can fit into 345 * The Value is larger than the maximum value that can fit into
331 * the register. 346 * the register.
332 */ 347 */
348 ACPI_ERROR((AE_INFO,
349 "Index value 0x%8.8X%8.8X overflows field width 0x%X",
350 ACPI_FORMAT_UINT64(value),
351 obj_desc->common_field.bit_length));
352
333 return (TRUE); 353 return (TRUE);
334 } 354 }
335 355
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 703d88ed0b3d..0a0893310348 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index be1c56ead653..60933e9dc3c0 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 49ec049c157e..fcc75fa27d32 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 236ead14b7f7..9ba8c73cea16 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 2571b4a310f4..879e8a277b94 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 1b48d9d28c9a..71fcc65c9ffa 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index f4a2787e8e92..0786b8659061 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index cc95e2000406..30157f5a12d7 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -47,6 +47,7 @@
47#include "acinterp.h" 47#include "acinterp.h"
48#include "amlcode.h" 48#include "amlcode.h"
49#include "acnamesp.h" 49#include "acnamesp.h"
50#include "acdispat.h"
50 51
51#define _COMPONENT ACPI_EXECUTER 52#define _COMPONENT ACPI_EXECUTER
52ACPI_MODULE_NAME("exprep") 53ACPI_MODULE_NAME("exprep")
@@ -455,6 +456,30 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
455 obj_desc->field.region_obj = 456 obj_desc->field.region_obj =
456 acpi_ns_get_attached_object(info->region_node); 457 acpi_ns_get_attached_object(info->region_node);
457 458
459 /* Fields specific to generic_serial_bus fields */
460
461 obj_desc->field.access_length = info->access_length;
462
463 if (info->connection_node) {
464 second_desc = info->connection_node->object;
465 if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) {
466 status =
467 acpi_ds_get_buffer_arguments(second_desc);
468 if (ACPI_FAILURE(status)) {
469 acpi_ut_delete_object_desc(obj_desc);
470 return_ACPI_STATUS(status);
471 }
472 }
473
474 obj_desc->field.resource_buffer =
475 second_desc->buffer.pointer;
476 obj_desc->field.resource_length =
477 (u16)second_desc->buffer.length;
478 } else if (info->resource_buffer) {
479 obj_desc->field.resource_buffer = info->resource_buffer;
480 obj_desc->field.resource_length = info->resource_length;
481 }
482
458 /* Allow full data read from EC address space */ 483 /* Allow full data read from EC address space */
459 484
460 if ((obj_desc->field.region_obj->region.space_id == 485 if ((obj_desc->field.region_obj->region.space_id ==
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index f0d5e14f1f2c..12d51df6d3bf 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 55997e46948b..fa50e77e64a8 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index db502cd7d934..6e335dc34528 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index e3bb00ccdff5..a67b1d925ddd 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index c0c8842dd344..c6cf843cc4c9 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index a979017d56b8..b35bed52e061 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -7,7 +7,7 @@
7 *****************************************************************************/ 7 *****************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2011, Intel Corp. 10 * Copyright (C) 2000 - 2012, Intel Corp.
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index dc665cc554de..65a45d8335c8 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index df66e7b686be..191a12945226 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 8ad93146dd32..eb6798ba8b59 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -435,4 +435,29 @@ void acpi_ex_integer_to_string(char *out_string, u64 value)
435 } 435 }
436} 436}
437 437
438/*******************************************************************************
439 *
440 * FUNCTION: acpi_is_valid_space_id
441 *
442 * PARAMETERS: space_id - ID to be validated
443 *
444 * RETURN: TRUE if valid/supported ID.
445 *
446 * DESCRIPTION: Validate an operation region space_iD.
447 *
448 ******************************************************************************/
449
450u8 acpi_is_valid_space_id(u8 space_id)
451{
452
453 if ((space_id >= ACPI_NUM_PREDEFINED_REGIONS) &&
454 (space_id < ACPI_USER_REGION_BEGIN) &&
455 (space_id != ACPI_ADR_SPACE_DATA_TABLE) &&
456 (space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
457 return (FALSE);
458 }
459
460 return (TRUE);
461}
462
438#endif 463#endif
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index fc380d3d45ab..d21ec5f0b3a9 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index f610d88a66be..1a6894afef79 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index 050fd227951b..1455ddcdc32c 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index cc70f3fdcdd1..4ea4eeb51bfd 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -7,7 +7,7 @@
7 ******************************************************************************/ 7 ******************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2011, Intel Corp. 10 * Copyright (C) 2000 - 2012, Intel Corp.
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index d52da3073650..3c4a922a9fc2 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 50d21c40b5c1..d4973d9da9f1 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 5f1605874655..6e5c43a60bb7 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -134,6 +134,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
134 /* Supported widths are 8/16/32 */ 134 /* Supported widths are 8/16/32 */
135 135
136 if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) { 136 if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) {
137 ACPI_ERROR((AE_INFO,
138 "Bad BitWidth parameter: %8.8X", bit_width));
137 return AE_BAD_PARAMETER; 139 return AE_BAD_PARAMETER;
138 } 140 }
139 141
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index d707756228c2..9d38eb6c0d0b 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index d93172fd15a8..61623f3f6826 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 1d0ef15d158f..7c3d3ceb98b3 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index b683cc2ff9d3..b7f2b3be79ac 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 2ed294b7a4db..30ea5bc53a78 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index c1bd02b1a058..f375cb82e321 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index fd7c6380e294..9d84ec2f0211 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 5f7dc691c183..5cbf15ffe7d8 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d5fa520c3de5..b20e7c8c3ffb 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 3bb8bf105ea2..dd77a3ce6e50 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index b3234fa795b8..ec7ba2d3463c 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index c845c8089f39..bbe46a447d34 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -620,6 +620,7 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
620 case ACPI_PTYPE2_FIXED: 620 case ACPI_PTYPE2_FIXED:
621 case ACPI_PTYPE2_MIN: 621 case ACPI_PTYPE2_MIN:
622 case ACPI_PTYPE2_COUNT: 622 case ACPI_PTYPE2_COUNT:
623 case ACPI_PTYPE2_FIX_VAR:
623 624
624 /* 625 /*
625 * These types all return a single Package that consists of a 626 * These types all return a single Package that consists of a
@@ -759,6 +760,34 @@ acpi_ns_check_package_list(struct acpi_predefined_data *data,
759 } 760 }
760 break; 761 break;
761 762
763 case ACPI_PTYPE2_FIX_VAR:
764 /*
765 * Each subpackage has a fixed number of elements and an
766 * optional element
767 */
768 expected_count =
769 package->ret_info.count1 + package->ret_info.count2;
770 if (sub_package->package.count < expected_count) {
771 goto package_too_small;
772 }
773
774 status =
775 acpi_ns_check_package_elements(data, sub_elements,
776 package->ret_info.
777 object_type1,
778 package->ret_info.
779 count1,
780 package->ret_info.
781 object_type2,
782 sub_package->package.
783 count -
784 package->ret_info.
785 count1, 0);
786 if (ACPI_FAILURE(status)) {
787 return (status);
788 }
789 break;
790
762 case ACPI_PTYPE2_FIXED: 791 case ACPI_PTYPE2_FIXED:
763 792
764 /* Each sub-package has a fixed length */ 793 /* Each sub-package has a fixed length */
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index ac7b854b0bd7..9c35d20eb52b 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -634,6 +634,7 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
634 case ACPI_PTYPE2_FIXED: 634 case ACPI_PTYPE2_FIXED:
635 case ACPI_PTYPE2_MIN: 635 case ACPI_PTYPE2_MIN:
636 case ACPI_PTYPE2_REV_FIXED: 636 case ACPI_PTYPE2_REV_FIXED:
637 case ACPI_PTYPE2_FIX_VAR:
637 break; 638 break;
638 639
639 default: 640 default:
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 024c4f263f87..726bc8e687f7 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -467,11 +467,12 @@ acpi_ns_repair_HID(struct acpi_predefined_data *data,
467 } 467 }
468 468
469 /* 469 /*
470 * Copy and uppercase the string. From the ACPI specification: 470 * Copy and uppercase the string. From the ACPI 5.0 specification:
471 * 471 *
472 * A valid PNP ID must be of the form "AAA####" where A is an uppercase 472 * A valid PNP ID must be of the form "AAA####" where A is an uppercase
473 * letter and # is a hex digit. A valid ACPI ID must be of the form 473 * letter and # is a hex digit. A valid ACPI ID must be of the form
474 * "ACPI####" where # is a hex digit. 474 * "NNNN####" where N is an uppercase letter or decimal digit, and
475 * # is a hex digit.
475 */ 476 */
476 for (dest = new_string->string.pointer; *source; dest++, source++) { 477 for (dest = new_string->string.pointer; *source; dest++, source++) {
477 *dest = (char)ACPI_TOUPPER(*source); 478 *dest = (char)ACPI_TOUPPER(*source);
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 28b0d7a62b99..507043d66114 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index cb1b104a69a2..a535b7afda5c 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 345f0c3c6ad2..f69895a54895 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index e7f016d1b226..71d15f61807b 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 83bf93024303..af401c9c4dfc 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 57e6d825ed84..880a605cee20 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index e1fad0ee0136..5ac36aba507c 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -484,34 +484,54 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
484static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state 484static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
485 *parser_state) 485 *parser_state)
486{ 486{
487 u32 aml_offset = (u32) 487 u32 aml_offset;
488 ACPI_PTR_DIFF(parser_state->aml,
489 parser_state->aml_start);
490 union acpi_parse_object *field; 488 union acpi_parse_object *field;
489 union acpi_parse_object *arg = NULL;
491 u16 opcode; 490 u16 opcode;
492 u32 name; 491 u32 name;
492 u8 access_type;
493 u8 access_attribute;
494 u8 access_length;
495 u32 pkg_length;
496 u8 *pkg_end;
497 u32 buffer_length;
493 498
494 ACPI_FUNCTION_TRACE(ps_get_next_field); 499 ACPI_FUNCTION_TRACE(ps_get_next_field);
495 500
501 aml_offset =
502 (u32)ACPI_PTR_DIFF(parser_state->aml, parser_state->aml_start);
503
496 /* Determine field type */ 504 /* Determine field type */
497 505
498 switch (ACPI_GET8(parser_state->aml)) { 506 switch (ACPI_GET8(parser_state->aml)) {
499 default: 507 case AML_FIELD_OFFSET_OP:
500 508
501 opcode = AML_INT_NAMEDFIELD_OP; 509 opcode = AML_INT_RESERVEDFIELD_OP;
510 parser_state->aml++;
502 break; 511 break;
503 512
504 case 0x00: 513 case AML_FIELD_ACCESS_OP:
505 514
506 opcode = AML_INT_RESERVEDFIELD_OP; 515 opcode = AML_INT_ACCESSFIELD_OP;
507 parser_state->aml++; 516 parser_state->aml++;
508 break; 517 break;
509 518
510 case 0x01: 519 case AML_FIELD_CONNECTION_OP:
511 520
512 opcode = AML_INT_ACCESSFIELD_OP; 521 opcode = AML_INT_CONNECTION_OP;
522 parser_state->aml++;
523 break;
524
525 case AML_FIELD_EXT_ACCESS_OP:
526
527 opcode = AML_INT_EXTACCESSFIELD_OP;
513 parser_state->aml++; 528 parser_state->aml++;
514 break; 529 break;
530
531 default:
532
533 opcode = AML_INT_NAMEDFIELD_OP;
534 break;
515 } 535 }
516 536
517 /* Allocate a new field op */ 537 /* Allocate a new field op */
@@ -549,16 +569,111 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
549 break; 569 break;
550 570
551 case AML_INT_ACCESSFIELD_OP: 571 case AML_INT_ACCESSFIELD_OP:
572 case AML_INT_EXTACCESSFIELD_OP:
552 573
553 /* 574 /*
554 * Get access_type and access_attrib and merge into the field Op 575 * Get access_type and access_attrib and merge into the field Op
555 * access_type is first operand, access_attribute is second 576 * access_type is first operand, access_attribute is second. stuff
577 * these bytes into the node integer value for convenience.
556 */ 578 */
557 field->common.value.integer = 579
558 (((u32) ACPI_GET8(parser_state->aml) << 8)); 580 /* Get the two bytes (Type/Attribute) */
581
582 access_type = ACPI_GET8(parser_state->aml);
559 parser_state->aml++; 583 parser_state->aml++;
560 field->common.value.integer |= ACPI_GET8(parser_state->aml); 584 access_attribute = ACPI_GET8(parser_state->aml);
561 parser_state->aml++; 585 parser_state->aml++;
586
587 field->common.value.integer = (u8)access_type;
588 field->common.value.integer |= (u16)(access_attribute << 8);
589
590 /* This opcode has a third byte, access_length */
591
592 if (opcode == AML_INT_EXTACCESSFIELD_OP) {
593 access_length = ACPI_GET8(parser_state->aml);
594 parser_state->aml++;
595
596 field->common.value.integer |=
597 (u32)(access_length << 16);
598 }
599 break;
600
601 case AML_INT_CONNECTION_OP:
602
603 /*
604 * Argument for Connection operator can be either a Buffer
605 * (resource descriptor), or a name_string.
606 */
607 if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) {
608 parser_state->aml++;
609
610 pkg_end = parser_state->aml;
611 pkg_length =
612 acpi_ps_get_next_package_length(parser_state);
613 pkg_end += pkg_length;
614
615 if (parser_state->aml < pkg_end) {
616
617 /* Non-empty list */
618
619 arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
620 if (!arg) {
621 return_PTR(NULL);
622 }
623
624 /* Get the actual buffer length argument */
625
626 opcode = ACPI_GET8(parser_state->aml);
627 parser_state->aml++;
628
629 switch (opcode) {
630 case AML_BYTE_OP: /* AML_BYTEDATA_ARG */
631 buffer_length =
632 ACPI_GET8(parser_state->aml);
633 parser_state->aml += 1;
634 break;
635
636 case AML_WORD_OP: /* AML_WORDDATA_ARG */
637 buffer_length =
638 ACPI_GET16(parser_state->aml);
639 parser_state->aml += 2;
640 break;
641
642 case AML_DWORD_OP: /* AML_DWORDATA_ARG */
643 buffer_length =
644 ACPI_GET32(parser_state->aml);
645 parser_state->aml += 4;
646 break;
647
648 default:
649 buffer_length = 0;
650 break;
651 }
652
653 /* Fill in bytelist data */
654
655 arg->named.value.size = buffer_length;
656 arg->named.data = parser_state->aml;
657 }
658
659 /* Skip to End of byte data */
660
661 parser_state->aml = pkg_end;
662 } else {
663 arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
664 if (!arg) {
665 return_PTR(NULL);
666 }
667
668 /* Get the Namestring argument */
669
670 arg->common.value.name =
671 acpi_ps_get_next_namestring(parser_state);
672 }
673
674 /* Link the buffer/namestring to parent (CONNECTION_OP) */
675
676 acpi_ps_append_arg(field, arg);
562 break; 677 break;
563 678
564 default: 679 default:
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 01dd70d1de51..9547ad8a620b 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index bed08de7528c..a0226fdcf75c 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -638,7 +638,16 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
638 638
639/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY, 639/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
640 AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R, 640 AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
641 AML_FLAGS_EXEC_0A_0T_1R) 641 AML_FLAGS_EXEC_0A_0T_1R),
642
643/* ACPI 5.0 opcodes */
644
645/* 7F */ ACPI_OP("-ConnectField-", ARGP_CONNECTFIELD_OP,
646 ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY,
647 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_HAS_ARGS),
648/* 80 */ ACPI_OP("-ExtAccessField-", ARGP_CONNECTFIELD_OP,
649 ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY,
650 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0)
642 651
643/*! [End] no source code translation !*/ 652/*! [End] no source code translation !*/
644}; 653};
@@ -657,7 +666,7 @@ static const u8 acpi_gbl_short_op_index[256] = {
657/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 666/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
658/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX, 667/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
659/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D, 668/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D,
660/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 669/* 0x38 */ 0x7F, 0x80, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
661/* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, 670/* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
662/* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, 671/* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
663/* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, 672/* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 9bb0cbd37b5e..2ff9c35a1968 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index a5faa1323a02..c872aa4b926e 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index f1464c03aa42..2b03cdbbe1c0 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -74,6 +74,12 @@ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
74 74
75 ACPI_FUNCTION_ENTRY(); 75 ACPI_FUNCTION_ENTRY();
76 76
77/*
78 if (Op->Common.aml_opcode == AML_INT_CONNECTION_OP)
79 {
80 return (Op->Common.Value.Arg);
81 }
82*/
77 /* Get the info structure for this opcode */ 83 /* Get the info structure for this opcode */
78 84
79 op_info = acpi_ps_get_opcode_info(op->common.aml_opcode); 85 op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 7eda78503422..13bb131ae125 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 3312d6368bf1..ab96cf47896d 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 8086805d4494..9d98c5ff66a5 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 9e66f9078426..a0305652394f 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 3a8a89ec2ca4..3c6df4b7eb2d 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -313,6 +313,38 @@ acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
313 resource_source)); 313 resource_source));
314 break; 314 break;
315 315
316 case ACPI_RESOURCE_TYPE_GPIO:
317
318 total_size =
319 (acpi_rs_length) (total_size +
320 (resource->data.gpio.
321 pin_table_length * 2) +
322 resource->data.gpio.
323 resource_source.string_length +
324 resource->data.gpio.
325 vendor_length);
326
327 break;
328
329 case ACPI_RESOURCE_TYPE_SERIAL_BUS:
330
331 total_size =
332 acpi_gbl_aml_resource_serial_bus_sizes[resource->
333 data.
334 common_serial_bus.
335 type];
336
337 total_size = (acpi_rs_length) (total_size +
338 resource->data.
339 i2c_serial_bus.
340 resource_source.
341 string_length +
342 resource->data.
343 i2c_serial_bus.
344 vendor_length);
345
346 break;
347
316 default: 348 default:
317 break; 349 break;
318 } 350 }
@@ -362,10 +394,11 @@ acpi_rs_get_list_length(u8 * aml_buffer,
362 u32 extra_struct_bytes; 394 u32 extra_struct_bytes;
363 u8 resource_index; 395 u8 resource_index;
364 u8 minimum_aml_resource_length; 396 u8 minimum_aml_resource_length;
397 union aml_resource *aml_resource;
365 398
366 ACPI_FUNCTION_TRACE(rs_get_list_length); 399 ACPI_FUNCTION_TRACE(rs_get_list_length);
367 400
368 *size_needed = 0; 401 *size_needed = ACPI_RS_SIZE_MIN; /* Minimum size is one end_tag */
369 end_aml = aml_buffer + aml_buffer_length; 402 end_aml = aml_buffer + aml_buffer_length;
370 403
371 /* Walk the list of AML resource descriptors */ 404 /* Walk the list of AML resource descriptors */
@@ -376,9 +409,15 @@ acpi_rs_get_list_length(u8 * aml_buffer,
376 409
377 status = acpi_ut_validate_resource(aml_buffer, &resource_index); 410 status = acpi_ut_validate_resource(aml_buffer, &resource_index);
378 if (ACPI_FAILURE(status)) { 411 if (ACPI_FAILURE(status)) {
412 /*
413 * Exit on failure. Cannot continue because the descriptor length
414 * may be bogus also.
415 */
379 return_ACPI_STATUS(status); 416 return_ACPI_STATUS(status);
380 } 417 }
381 418
419 aml_resource = (void *)aml_buffer;
420
382 /* Get the resource length and base (minimum) AML size */ 421 /* Get the resource length and base (minimum) AML size */
383 422
384 resource_length = acpi_ut_get_resource_length(aml_buffer); 423 resource_length = acpi_ut_get_resource_length(aml_buffer);
@@ -422,10 +461,8 @@ acpi_rs_get_list_length(u8 * aml_buffer,
422 461
423 case ACPI_RESOURCE_NAME_END_TAG: 462 case ACPI_RESOURCE_NAME_END_TAG:
424 /* 463 /*
425 * End Tag: 464 * End Tag: This is the normal exit
426 * This is the normal exit, add size of end_tag
427 */ 465 */
428 *size_needed += ACPI_RS_SIZE_MIN;
429 return_ACPI_STATUS(AE_OK); 466 return_ACPI_STATUS(AE_OK);
430 467
431 case ACPI_RESOURCE_NAME_ADDRESS32: 468 case ACPI_RESOURCE_NAME_ADDRESS32:
@@ -457,6 +494,33 @@ acpi_rs_get_list_length(u8 * aml_buffer,
457 minimum_aml_resource_length); 494 minimum_aml_resource_length);
458 break; 495 break;
459 496
497 case ACPI_RESOURCE_NAME_GPIO:
498
499 /* Vendor data is optional */
500
501 if (aml_resource->gpio.vendor_length) {
502 extra_struct_bytes +=
503 aml_resource->gpio.vendor_offset -
504 aml_resource->gpio.pin_table_offset +
505 aml_resource->gpio.vendor_length;
506 } else {
507 extra_struct_bytes +=
508 aml_resource->large_header.resource_length +
509 sizeof(struct aml_resource_large_header) -
510 aml_resource->gpio.pin_table_offset;
511 }
512 break;
513
514 case ACPI_RESOURCE_NAME_SERIAL_BUS:
515
516 minimum_aml_resource_length =
517 acpi_gbl_resource_aml_serial_bus_sizes
518 [aml_resource->common_serial_bus.type];
519 extra_struct_bytes +=
520 aml_resource->common_serial_bus.resource_length -
521 minimum_aml_resource_length;
522 break;
523
460 default: 524 default:
461 break; 525 break;
462 } 526 }
@@ -467,9 +531,18 @@ acpi_rs_get_list_length(u8 * aml_buffer,
467 * Important: Round the size up for the appropriate alignment. This 531 * Important: Round the size up for the appropriate alignment. This
468 * is a requirement on IA64. 532 * is a requirement on IA64.
469 */ 533 */
470 buffer_size = acpi_gbl_resource_struct_sizes[resource_index] + 534 if (acpi_ut_get_resource_type(aml_buffer) ==
471 extra_struct_bytes; 535 ACPI_RESOURCE_NAME_SERIAL_BUS) {
472 buffer_size = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size); 536 buffer_size =
537 acpi_gbl_resource_struct_serial_bus_sizes
538 [aml_resource->common_serial_bus.type] +
539 extra_struct_bytes;
540 } else {
541 buffer_size =
542 acpi_gbl_resource_struct_sizes[resource_index] +
543 extra_struct_bytes;
544 }
545 buffer_size = (u32)ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
473 546
474 *size_needed += buffer_size; 547 *size_needed += buffer_size;
475 548
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 4ce6e1147e80..46d6eb38ae66 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,70 @@ ACPI_MODULE_NAME("rscreate")
51 51
52/******************************************************************************* 52/*******************************************************************************
53 * 53 *
54 * FUNCTION: acpi_buffer_to_resource
55 *
56 * PARAMETERS: aml_buffer - Pointer to the resource byte stream
57 * aml_buffer_length - Length of the aml_buffer
58 * resource_ptr - Where the converted resource is returned
59 *
60 * RETURN: Status
61 *
62 * DESCRIPTION: Convert a raw AML buffer to a resource list
63 *
64 ******************************************************************************/
65acpi_status
66acpi_buffer_to_resource(u8 *aml_buffer,
67 u16 aml_buffer_length,
68 struct acpi_resource **resource_ptr)
69{
70 acpi_status status;
71 acpi_size list_size_needed;
72 void *resource;
73 void *current_resource_ptr;
74
75 /*
76 * Note: we allow AE_AML_NO_RESOURCE_END_TAG, since an end tag
77 * is not required here.
78 */
79
80 /* Get the required length for the converted resource */
81
82 status = acpi_rs_get_list_length(aml_buffer, aml_buffer_length,
83 &list_size_needed);
84 if (status == AE_AML_NO_RESOURCE_END_TAG) {
85 status = AE_OK;
86 }
87 if (ACPI_FAILURE(status)) {
88 return (status);
89 }
90
91 /* Allocate a buffer for the converted resource */
92
93 resource = ACPI_ALLOCATE_ZEROED(list_size_needed);
94 current_resource_ptr = resource;
95 if (!resource) {
96 return (AE_NO_MEMORY);
97 }
98
99 /* Perform the AML-to-Resource conversion */
100
101 status = acpi_ut_walk_aml_resources(aml_buffer, aml_buffer_length,
102 acpi_rs_convert_aml_to_resources,
103 &current_resource_ptr);
104 if (status == AE_AML_NO_RESOURCE_END_TAG) {
105 status = AE_OK;
106 }
107 if (ACPI_FAILURE(status)) {
108 ACPI_FREE(resource);
109 } else {
110 *resource_ptr = resource;
111 }
112
113 return (status);
114}
115
116/*******************************************************************************
117 *
54 * FUNCTION: acpi_rs_create_resource_list 118 * FUNCTION: acpi_rs_create_resource_list
55 * 119 *
56 * PARAMETERS: aml_buffer - Pointer to the resource byte stream 120 * PARAMETERS: aml_buffer - Pointer to the resource byte stream
@@ -66,9 +130,10 @@ ACPI_MODULE_NAME("rscreate")
66 * of device resources. 130 * of device resources.
67 * 131 *
68 ******************************************************************************/ 132 ******************************************************************************/
133
69acpi_status 134acpi_status
70acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer, 135acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
71 struct acpi_buffer *output_buffer) 136 struct acpi_buffer * output_buffer)
72{ 137{
73 138
74 acpi_status status; 139 acpi_status status;
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 33db7520c74b..b4c581132393 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -61,11 +61,13 @@ static void acpi_rs_out_integer64(char *title, u64 value);
61 61
62static void acpi_rs_out_title(char *title); 62static void acpi_rs_out_title(char *title);
63 63
64static void acpi_rs_dump_byte_list(u16 length, u8 * data); 64static void acpi_rs_dump_byte_list(u16 length, u8 *data);
65 65
66static void acpi_rs_dump_dword_list(u8 length, u32 * data); 66static void acpi_rs_dump_word_list(u16 length, u16 *data);
67 67
68static void acpi_rs_dump_short_byte_list(u8 length, u8 * data); 68static void acpi_rs_dump_dword_list(u8 length, u32 *data);
69
70static void acpi_rs_dump_short_byte_list(u8 length, u8 *data);
69 71
70static void 72static void
71acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source); 73acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source);
@@ -309,6 +311,125 @@ struct acpi_rsdump_info acpi_rs_dump_generic_reg[6] = {
309 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL} 311 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL}
310}; 312};
311 313
314struct acpi_rsdump_info acpi_rs_dump_gpio[16] = {
315 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_gpio), "GPIO", NULL},
316 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.revision_id), "RevisionId", NULL},
317 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.connection_type),
318 "ConnectionType", acpi_gbl_ct_decode},
319 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.producer_consumer),
320 "ProducerConsumer", acpi_gbl_consume_decode},
321 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.pin_config), "PinConfig",
322 acpi_gbl_ppc_decode},
323 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.sharable), "Sharable",
324 acpi_gbl_shr_decode},
325 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.io_restriction),
326 "IoRestriction", acpi_gbl_ior_decode},
327 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.triggering), "Triggering",
328 acpi_gbl_he_decode},
329 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.polarity), "Polarity",
330 acpi_gbl_ll_decode},
331 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.drive_strength), "DriveStrength",
332 NULL},
333 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.debounce_timeout),
334 "DebounceTimeout", NULL},
335 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(gpio.resource_source),
336 "ResourceSource", NULL},
337 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.pin_table_length),
338 "PinTableLength", NULL},
339 {ACPI_RSD_WORDLIST, ACPI_RSD_OFFSET(gpio.pin_table), "PinTable", NULL},
340 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.vendor_length), "VendorLength",
341 NULL},
342 {ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(gpio.vendor_data), "VendorData",
343 NULL},
344};
345
346struct acpi_rsdump_info acpi_rs_dump_fixed_dma[4] = {
347 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_dma),
348 "FixedDma", NULL},
349 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.request_lines),
350 "RequestLines", NULL},
351 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.channels), "Channels",
352 NULL},
353 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_dma.width), "TransferWidth",
354 acpi_gbl_dts_decode},
355};
356
357#define ACPI_RS_DUMP_COMMON_SERIAL_BUS \
358 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.revision_id), "RevisionId", NULL}, \
359 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type), "Type", acpi_gbl_sbt_decode}, \
360 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, \
361 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.slave_mode), "SlaveMode", acpi_gbl_sm_decode}, \
362 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type_revision_id), "TypeRevisionId", NULL}, \
363 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.type_data_length), "TypeDataLength", NULL}, \
364 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET (common_serial_bus.resource_source), "ResourceSource", NULL}, \
365 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.vendor_length), "VendorLength", NULL}, \
366 {ACPI_RSD_SHORTLISTX,ACPI_RSD_OFFSET (common_serial_bus.vendor_data), "VendorData", NULL},
367
368struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[10] = {
369 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_common_serial_bus),
370 "Common Serial Bus", NULL},
371 ACPI_RS_DUMP_COMMON_SERIAL_BUS
372};
373
374struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
375 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus),
376 "I2C Serial Bus", NULL},
377 ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
378 ACPI_RSD_OFFSET(i2c_serial_bus.
379 access_mode),
380 "AccessMode", acpi_gbl_am_decode},
381 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(i2c_serial_bus.connection_speed),
382 "ConnectionSpeed", NULL},
383 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(i2c_serial_bus.slave_address),
384 "SlaveAddress", NULL},
385};
386
387struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
388 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_spi_serial_bus),
389 "Spi Serial Bus", NULL},
390 ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
391 ACPI_RSD_OFFSET(spi_serial_bus.
392 wire_mode), "WireMode",
393 acpi_gbl_wm_decode},
394 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(spi_serial_bus.device_polarity),
395 "DevicePolarity", acpi_gbl_dp_decode},
396 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.data_bit_length),
397 "DataBitLength", NULL},
398 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_phase),
399 "ClockPhase", acpi_gbl_cph_decode},
400 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_polarity),
401 "ClockPolarity", acpi_gbl_cpo_decode},
402 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(spi_serial_bus.device_selection),
403 "DeviceSelection", NULL},
404 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(spi_serial_bus.connection_speed),
405 "ConnectionSpeed", NULL},
406};
407
408struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[19] = {
409 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_uart_serial_bus),
410 "Uart Serial Bus", NULL},
411 ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_2BITFLAG,
412 ACPI_RSD_OFFSET(uart_serial_bus.
413 flow_control),
414 "FlowControl", acpi_gbl_fc_decode},
415 {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.stop_bits),
416 "StopBits", acpi_gbl_sb_decode},
417 {ACPI_RSD_3BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.data_bits),
418 "DataBits", acpi_gbl_bpb_decode},
419 {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.endian), "Endian",
420 acpi_gbl_ed_decode},
421 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.parity), "Parity",
422 acpi_gbl_pt_decode},
423 {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.lines_enabled),
424 "LinesEnabled", NULL},
425 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.rx_fifo_size),
426 "RxFifoSize", NULL},
427 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.tx_fifo_size),
428 "TxFifoSize", NULL},
429 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(uart_serial_bus.default_baud_rate),
430 "ConnectionSpeed", NULL},
431};
432
312/* 433/*
313 * Tables used for common address descriptor flag fields 434 * Tables used for common address descriptor flag fields
314 */ 435 */
@@ -413,7 +534,14 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
413 /* Data items, 8/16/32/64 bit */ 534 /* Data items, 8/16/32/64 bit */
414 535
415 case ACPI_RSD_UINT8: 536 case ACPI_RSD_UINT8:
416 acpi_rs_out_integer8(name, ACPI_GET8(target)); 537 if (table->pointer) {
538 acpi_rs_out_string(name, ACPI_CAST_PTR(char,
539 table->
540 pointer
541 [*target]));
542 } else {
543 acpi_rs_out_integer8(name, ACPI_GET8(target));
544 }
417 break; 545 break;
418 546
419 case ACPI_RSD_UINT16: 547 case ACPI_RSD_UINT16:
@@ -444,6 +572,13 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
444 0x03])); 572 0x03]));
445 break; 573 break;
446 574
575 case ACPI_RSD_3BITFLAG:
576 acpi_rs_out_string(name, ACPI_CAST_PTR(char,
577 table->
578 pointer[*target &
579 0x07]));
580 break;
581
447 case ACPI_RSD_SHORTLIST: 582 case ACPI_RSD_SHORTLIST:
448 /* 583 /*
449 * Short byte list (single line output) for DMA and IRQ resources 584 * Short byte list (single line output) for DMA and IRQ resources
@@ -456,6 +591,20 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
456 } 591 }
457 break; 592 break;
458 593
594 case ACPI_RSD_SHORTLISTX:
595 /*
596 * Short byte list (single line output) for GPIO vendor data
597 * Note: The list length is obtained from the previous table entry
598 */
599 if (previous_target) {
600 acpi_rs_out_title(name);
601 acpi_rs_dump_short_byte_list(*previous_target,
602 *
603 (ACPI_CAST_INDIRECT_PTR
604 (u8, target)));
605 }
606 break;
607
459 case ACPI_RSD_LONGLIST: 608 case ACPI_RSD_LONGLIST:
460 /* 609 /*
461 * Long byte list for Vendor resource data 610 * Long byte list for Vendor resource data
@@ -480,6 +629,18 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
480 } 629 }
481 break; 630 break;
482 631
632 case ACPI_RSD_WORDLIST:
633 /*
634 * Word list for GPIO Pin Table
635 * Note: The list length is obtained from the previous table entry
636 */
637 if (previous_target) {
638 acpi_rs_dump_word_list(*previous_target,
639 *(ACPI_CAST_INDIRECT_PTR
640 (u16, target)));
641 }
642 break;
643
483 case ACPI_RSD_ADDRESS: 644 case ACPI_RSD_ADDRESS:
484 /* 645 /*
485 * Common flags for all Address resources 646 * Common flags for all Address resources
@@ -627,14 +788,20 @@ void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
627 788
628 /* Dump the resource descriptor */ 789 /* Dump the resource descriptor */
629 790
630 acpi_rs_dump_descriptor(&resource_list->data, 791 if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
631 acpi_gbl_dump_resource_dispatch[type]); 792 acpi_rs_dump_descriptor(&resource_list->data,
793 acpi_gbl_dump_serial_bus_dispatch
794 [resource_list->data.
795 common_serial_bus.type]);
796 } else {
797 acpi_rs_dump_descriptor(&resource_list->data,
798 acpi_gbl_dump_resource_dispatch
799 [type]);
800 }
632 801
633 /* Point to the next resource structure */ 802 /* Point to the next resource structure */
634 803
635 resource_list = 804 resource_list = ACPI_NEXT_RESOURCE(resource_list);
636 ACPI_ADD_PTR(struct acpi_resource, resource_list,
637 resource_list->length);
638 805
639 /* Exit when END_TAG descriptor is reached */ 806 /* Exit when END_TAG descriptor is reached */
640 807
@@ -768,4 +935,13 @@ static void acpi_rs_dump_dword_list(u8 length, u32 * data)
768 } 935 }
769} 936}
770 937
938static void acpi_rs_dump_word_list(u16 length, u16 *data)
939{
940 u16 i;
941
942 for (i = 0; i < length; i++) {
943 acpi_os_printf("%25s%2.2X : %4.4X\n", "Word", i, data[i]);
944 }
945}
946
771#endif 947#endif
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index f9ea60872aa4..a9fa5158200b 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -76,7 +76,10 @@ struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[] = {
76 acpi_rs_convert_address64, /* 0x0D, ACPI_RESOURCE_TYPE_ADDRESS64 */ 76 acpi_rs_convert_address64, /* 0x0D, ACPI_RESOURCE_TYPE_ADDRESS64 */
77 acpi_rs_convert_ext_address64, /* 0x0E, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */ 77 acpi_rs_convert_ext_address64, /* 0x0E, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
78 acpi_rs_convert_ext_irq, /* 0x0F, ACPI_RESOURCE_TYPE_EXTENDED_IRQ */ 78 acpi_rs_convert_ext_irq, /* 0x0F, ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
79 acpi_rs_convert_generic_reg /* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */ 79 acpi_rs_convert_generic_reg, /* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
80 acpi_rs_convert_gpio, /* 0x11, ACPI_RESOURCE_TYPE_GPIO */
81 acpi_rs_convert_fixed_dma, /* 0x12, ACPI_RESOURCE_TYPE_FIXED_DMA */
82 NULL, /* 0x13, ACPI_RESOURCE_TYPE_SERIAL_BUS - Use subtype table below */
80}; 83};
81 84
82/* Dispatch tables for AML-to-resource (Get Resource) conversion functions */ 85/* Dispatch tables for AML-to-resource (Get Resource) conversion functions */
@@ -94,7 +97,7 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
94 acpi_rs_convert_end_dpf, /* 0x07, ACPI_RESOURCE_NAME_END_DEPENDENT */ 97 acpi_rs_convert_end_dpf, /* 0x07, ACPI_RESOURCE_NAME_END_DEPENDENT */
95 acpi_rs_convert_io, /* 0x08, ACPI_RESOURCE_NAME_IO */ 98 acpi_rs_convert_io, /* 0x08, ACPI_RESOURCE_NAME_IO */
96 acpi_rs_convert_fixed_io, /* 0x09, ACPI_RESOURCE_NAME_FIXED_IO */ 99 acpi_rs_convert_fixed_io, /* 0x09, ACPI_RESOURCE_NAME_FIXED_IO */
97 NULL, /* 0x0A, Reserved */ 100 acpi_rs_convert_fixed_dma, /* 0x0A, ACPI_RESOURCE_NAME_FIXED_DMA */
98 NULL, /* 0x0B, Reserved */ 101 NULL, /* 0x0B, Reserved */
99 NULL, /* 0x0C, Reserved */ 102 NULL, /* 0x0C, Reserved */
100 NULL, /* 0x0D, Reserved */ 103 NULL, /* 0x0D, Reserved */
@@ -114,7 +117,19 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
114 acpi_rs_convert_address16, /* 0x08, ACPI_RESOURCE_NAME_ADDRESS16 */ 117 acpi_rs_convert_address16, /* 0x08, ACPI_RESOURCE_NAME_ADDRESS16 */
115 acpi_rs_convert_ext_irq, /* 0x09, ACPI_RESOURCE_NAME_EXTENDED_IRQ */ 118 acpi_rs_convert_ext_irq, /* 0x09, ACPI_RESOURCE_NAME_EXTENDED_IRQ */
116 acpi_rs_convert_address64, /* 0x0A, ACPI_RESOURCE_NAME_ADDRESS64 */ 119 acpi_rs_convert_address64, /* 0x0A, ACPI_RESOURCE_NAME_ADDRESS64 */
117 acpi_rs_convert_ext_address64 /* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */ 120 acpi_rs_convert_ext_address64, /* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */
121 acpi_rs_convert_gpio, /* 0x0C, ACPI_RESOURCE_NAME_GPIO */
122 NULL, /* 0x0D, Reserved */
123 NULL, /* 0x0E, ACPI_RESOURCE_NAME_SERIAL_BUS - Use subtype table below */
124};
125
126/* Subtype table for serial_bus -- I2C, SPI, and UART */
127
128struct acpi_rsconvert_info *acpi_gbl_convert_resource_serial_bus_dispatch[] = {
129 NULL,
130 acpi_rs_convert_i2c_serial_bus,
131 acpi_rs_convert_spi_serial_bus,
132 acpi_rs_convert_uart_serial_bus,
118}; 133};
119 134
120#ifdef ACPI_FUTURE_USAGE 135#ifdef ACPI_FUTURE_USAGE
@@ -140,6 +155,16 @@ struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = {
140 acpi_rs_dump_ext_address64, /* ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */ 155 acpi_rs_dump_ext_address64, /* ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
141 acpi_rs_dump_ext_irq, /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */ 156 acpi_rs_dump_ext_irq, /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
142 acpi_rs_dump_generic_reg, /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */ 157 acpi_rs_dump_generic_reg, /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
158 acpi_rs_dump_gpio, /* ACPI_RESOURCE_TYPE_GPIO */
159 acpi_rs_dump_fixed_dma, /* ACPI_RESOURCE_TYPE_FIXED_DMA */
160 NULL, /* ACPI_RESOURCE_TYPE_SERIAL_BUS */
161};
162
163struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = {
164 NULL,
165 acpi_rs_dump_i2c_serial_bus, /* AML_RESOURCE_I2C_BUS_TYPE */
166 acpi_rs_dump_spi_serial_bus, /* AML_RESOURCE_SPI_BUS_TYPE */
167 acpi_rs_dump_uart_serial_bus, /* AML_RESOURCE_UART_BUS_TYPE */
143}; 168};
144#endif 169#endif
145 170
@@ -166,7 +191,10 @@ const u8 acpi_gbl_aml_resource_sizes[] = {
166 sizeof(struct aml_resource_address64), /* ACPI_RESOURCE_TYPE_ADDRESS64 */ 191 sizeof(struct aml_resource_address64), /* ACPI_RESOURCE_TYPE_ADDRESS64 */
167 sizeof(struct aml_resource_extended_address64), /*ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */ 192 sizeof(struct aml_resource_extended_address64), /*ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
168 sizeof(struct aml_resource_extended_irq), /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */ 193 sizeof(struct aml_resource_extended_irq), /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
169 sizeof(struct aml_resource_generic_register) /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */ 194 sizeof(struct aml_resource_generic_register), /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
195 sizeof(struct aml_resource_gpio), /* ACPI_RESOURCE_TYPE_GPIO */
196 sizeof(struct aml_resource_fixed_dma), /* ACPI_RESOURCE_TYPE_FIXED_DMA */
197 sizeof(struct aml_resource_common_serialbus), /* ACPI_RESOURCE_TYPE_SERIAL_BUS */
170}; 198};
171 199
172const u8 acpi_gbl_resource_struct_sizes[] = { 200const u8 acpi_gbl_resource_struct_sizes[] = {
@@ -182,7 +210,7 @@ const u8 acpi_gbl_resource_struct_sizes[] = {
182 ACPI_RS_SIZE_MIN, 210 ACPI_RS_SIZE_MIN,
183 ACPI_RS_SIZE(struct acpi_resource_io), 211 ACPI_RS_SIZE(struct acpi_resource_io),
184 ACPI_RS_SIZE(struct acpi_resource_fixed_io), 212 ACPI_RS_SIZE(struct acpi_resource_fixed_io),
185 0, 213 ACPI_RS_SIZE(struct acpi_resource_fixed_dma),
186 0, 214 0,
187 0, 215 0,
188 0, 216 0,
@@ -202,5 +230,21 @@ const u8 acpi_gbl_resource_struct_sizes[] = {
202 ACPI_RS_SIZE(struct acpi_resource_address16), 230 ACPI_RS_SIZE(struct acpi_resource_address16),
203 ACPI_RS_SIZE(struct acpi_resource_extended_irq), 231 ACPI_RS_SIZE(struct acpi_resource_extended_irq),
204 ACPI_RS_SIZE(struct acpi_resource_address64), 232 ACPI_RS_SIZE(struct acpi_resource_address64),
205 ACPI_RS_SIZE(struct acpi_resource_extended_address64) 233 ACPI_RS_SIZE(struct acpi_resource_extended_address64),
234 ACPI_RS_SIZE(struct acpi_resource_gpio),
235 ACPI_RS_SIZE(struct acpi_resource_common_serialbus)
236};
237
238const u8 acpi_gbl_aml_resource_serial_bus_sizes[] = {
239 0,
240 sizeof(struct aml_resource_i2c_serialbus),
241 sizeof(struct aml_resource_spi_serialbus),
242 sizeof(struct aml_resource_uart_serialbus),
243};
244
245const u8 acpi_gbl_resource_struct_serial_bus_sizes[] = {
246 0,
247 ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
248 ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
249 ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
206}; 250};
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index 0c7efef008be..f6a081057a22 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index 50b8ad211167..e23a9ec248cb 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -264,3 +264,34 @@ struct acpi_rsconvert_info acpi_rs_convert_dma[6] = {
264 AML_OFFSET(dma.dma_channel_mask), 264 AML_OFFSET(dma.dma_channel_mask),
265 ACPI_RS_OFFSET(data.dma.channel_count)} 265 ACPI_RS_OFFSET(data.dma.channel_count)}
266}; 266};
267
268/*******************************************************************************
269 *
270 * acpi_rs_convert_fixed_dma
271 *
272 ******************************************************************************/
273
274struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[4] = {
275 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_DMA,
276 ACPI_RS_SIZE(struct acpi_resource_fixed_dma),
277 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_dma)},
278
279 {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_DMA,
280 sizeof(struct aml_resource_fixed_dma),
281 0},
282
283 /*
284 * These fields are contiguous in both the source and destination:
285 * request_lines
286 * Channels
287 */
288
289 {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.fixed_dma.request_lines),
290 AML_OFFSET(fixed_dma.request_lines),
291 2},
292
293 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.fixed_dma.width),
294 AML_OFFSET(fixed_dma.width),
295 1},
296
297};
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 1bfcef736c50..9be129f5d6f4 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,8 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
70 struct acpi_resource **resource_ptr = 70 struct acpi_resource **resource_ptr =
71 ACPI_CAST_INDIRECT_PTR(struct acpi_resource, context); 71 ACPI_CAST_INDIRECT_PTR(struct acpi_resource, context);
72 struct acpi_resource *resource; 72 struct acpi_resource *resource;
73 union aml_resource *aml_resource;
74 struct acpi_rsconvert_info *conversion_table;
73 acpi_status status; 75 acpi_status status;
74 76
75 ACPI_FUNCTION_TRACE(rs_convert_aml_to_resources); 77 ACPI_FUNCTION_TRACE(rs_convert_aml_to_resources);
@@ -84,14 +86,37 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
84 "Misaligned resource pointer %p", resource)); 86 "Misaligned resource pointer %p", resource));
85 } 87 }
86 88
89 /* Get the appropriate conversion info table */
90
91 aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
92 if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) {
93 if (aml_resource->common_serial_bus.type >
94 AML_RESOURCE_MAX_SERIALBUSTYPE) {
95 conversion_table = NULL;
96 } else {
97 /* This is an I2C, SPI, or UART serial_bus descriptor */
98
99 conversion_table =
100 acpi_gbl_convert_resource_serial_bus_dispatch
101 [aml_resource->common_serial_bus.type];
102 }
103 } else {
104 conversion_table =
105 acpi_gbl_get_resource_dispatch[resource_index];
106 }
107
108 if (!conversion_table) {
109 ACPI_ERROR((AE_INFO,
110 "Invalid/unsupported resource descriptor: Type 0x%2.2X",
111 resource_index));
112 return (AE_AML_INVALID_RESOURCE_TYPE);
113 }
114
87 /* Convert the AML byte stream resource to a local resource struct */ 115 /* Convert the AML byte stream resource to a local resource struct */
88 116
89 status = 117 status =
90 acpi_rs_convert_aml_to_resource(resource, 118 acpi_rs_convert_aml_to_resource(resource, aml_resource,
91 ACPI_CAST_PTR(union aml_resource, 119 conversion_table);
92 aml),
93 acpi_gbl_get_resource_dispatch
94 [resource_index]);
95 if (ACPI_FAILURE(status)) { 120 if (ACPI_FAILURE(status)) {
96 ACPI_EXCEPTION((AE_INFO, status, 121 ACPI_EXCEPTION((AE_INFO, status,
97 "Could not convert AML resource (Type 0x%X)", 122 "Could not convert AML resource (Type 0x%X)",
@@ -106,7 +131,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
106 131
107 /* Point to the next structure in the output buffer */ 132 /* Point to the next structure in the output buffer */
108 133
109 *resource_ptr = ACPI_ADD_PTR(void, resource, resource->length); 134 *resource_ptr = ACPI_NEXT_RESOURCE(resource);
110 return_ACPI_STATUS(AE_OK); 135 return_ACPI_STATUS(AE_OK);
111} 136}
112 137
@@ -135,6 +160,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
135{ 160{
136 u8 *aml = output_buffer; 161 u8 *aml = output_buffer;
137 u8 *end_aml = output_buffer + aml_size_needed; 162 u8 *end_aml = output_buffer + aml_size_needed;
163 struct acpi_rsconvert_info *conversion_table;
138 acpi_status status; 164 acpi_status status;
139 165
140 ACPI_FUNCTION_TRACE(rs_convert_resources_to_aml); 166 ACPI_FUNCTION_TRACE(rs_convert_resources_to_aml);
@@ -154,11 +180,34 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
154 180
155 /* Perform the conversion */ 181 /* Perform the conversion */
156 182
157 status = acpi_rs_convert_resource_to_aml(resource, ACPI_CAST_PTR(union 183 if (resource->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
158 aml_resource, 184 if (resource->data.common_serial_bus.type >
159 aml), 185 AML_RESOURCE_MAX_SERIALBUSTYPE) {
160 acpi_gbl_set_resource_dispatch 186 conversion_table = NULL;
161 [resource->type]); 187 } else {
188 /* This is an I2C, SPI, or UART serial_bus descriptor */
189
190 conversion_table =
191 acpi_gbl_convert_resource_serial_bus_dispatch
192 [resource->data.common_serial_bus.type];
193 }
194 } else {
195 conversion_table =
196 acpi_gbl_set_resource_dispatch[resource->type];
197 }
198
199 if (!conversion_table) {
200 ACPI_ERROR((AE_INFO,
201 "Invalid/unsupported resource descriptor: Type 0x%2.2X",
202 resource->type));
203 return (AE_AML_INVALID_RESOURCE_TYPE);
204 }
205
206 status = acpi_rs_convert_resource_to_aml(resource,
207 ACPI_CAST_PTR(union
208 aml_resource,
209 aml),
210 conversion_table);
162 if (ACPI_FAILURE(status)) { 211 if (ACPI_FAILURE(status)) {
163 ACPI_EXCEPTION((AE_INFO, status, 212 ACPI_EXCEPTION((AE_INFO, status,
164 "Could not convert resource (type 0x%X) to AML", 213 "Could not convert resource (type 0x%X) to AML",
@@ -192,9 +241,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
192 241
193 /* Point to the next input resource descriptor */ 242 /* Point to the next input resource descriptor */
194 243
195 resource = 244 resource = ACPI_NEXT_RESOURCE(resource);
196 ACPI_ADD_PTR(struct acpi_resource, resource,
197 resource->length);
198 } 245 }
199 246
200 /* Completed buffer, but did not find an end_tag resource descriptor */ 247 /* Completed buffer, but did not find an end_tag resource descriptor */
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index 7cc6d8625f1e..4fd611ad02b4 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 410264b22a29..8073b371cc7c 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -83,6 +83,10 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
83 83
84 ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource); 84 ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource);
85 85
86 if (!info) {
87 return_ACPI_STATUS(AE_BAD_PARAMETER);
88 }
89
86 if (((acpi_size) resource) & 0x3) { 90 if (((acpi_size) resource) & 0x3) {
87 91
88 /* Each internal resource struct is expected to be 32-bit aligned */ 92 /* Each internal resource struct is expected to be 32-bit aligned */
@@ -101,7 +105,6 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
101 * table length (# of table entries) 105 * table length (# of table entries)
102 */ 106 */
103 count = INIT_TABLE_LENGTH(info); 107 count = INIT_TABLE_LENGTH(info);
104
105 while (count) { 108 while (count) {
106 /* 109 /*
107 * Source is the external AML byte stream buffer, 110 * Source is the external AML byte stream buffer,
@@ -145,6 +148,14 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
145 ((ACPI_GET8(source) >> info->value) & 0x03); 148 ((ACPI_GET8(source) >> info->value) & 0x03);
146 break; 149 break;
147 150
151 case ACPI_RSC_3BITFLAG:
152 /*
153 * Mask and shift the flag bits
154 */
155 ACPI_SET8(destination) = (u8)
156 ((ACPI_GET8(source) >> info->value) & 0x07);
157 break;
158
148 case ACPI_RSC_COUNT: 159 case ACPI_RSC_COUNT:
149 160
150 item_count = ACPI_GET8(source); 161 item_count = ACPI_GET8(source);
@@ -163,6 +174,69 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
163 (info->value * (item_count - 1)); 174 (info->value * (item_count - 1));
164 break; 175 break;
165 176
177 case ACPI_RSC_COUNT_GPIO_PIN:
178
179 target = ACPI_ADD_PTR(void, aml, info->value);
180 item_count = ACPI_GET16(target) - ACPI_GET16(source);
181
182 resource->length = resource->length + item_count;
183 item_count = item_count / 2;
184 ACPI_SET16(destination) = item_count;
185 break;
186
187 case ACPI_RSC_COUNT_GPIO_VEN:
188
189 item_count = ACPI_GET8(source);
190 ACPI_SET8(destination) = (u8)item_count;
191
192 resource->length = resource->length +
193 (info->value * item_count);
194 break;
195
196 case ACPI_RSC_COUNT_GPIO_RES:
197
198 /*
199 * Vendor data is optional (length/offset may both be zero)
200 * Examine vendor data length field first
201 */
202 target = ACPI_ADD_PTR(void, aml, (info->value + 2));
203 if (ACPI_GET16(target)) {
204
205 /* Use vendor offset to get resource source length */
206
207 target = ACPI_ADD_PTR(void, aml, info->value);
208 item_count =
209 ACPI_GET16(target) - ACPI_GET16(source);
210 } else {
211 /* No vendor data to worry about */
212
213 item_count = aml->large_header.resource_length +
214 sizeof(struct aml_resource_large_header) -
215 ACPI_GET16(source);
216 }
217
218 resource->length = resource->length + item_count;
219 ACPI_SET16(destination) = item_count;
220 break;
221
222 case ACPI_RSC_COUNT_SERIAL_VEN:
223
224 item_count = ACPI_GET16(source) - info->value;
225
226 resource->length = resource->length + item_count;
227 ACPI_SET16(destination) = item_count;
228 break;
229
230 case ACPI_RSC_COUNT_SERIAL_RES:
231
232 item_count = (aml_resource_length +
233 sizeof(struct aml_resource_large_header))
234 - ACPI_GET16(source) - info->value;
235
236 resource->length = resource->length + item_count;
237 ACPI_SET16(destination) = item_count;
238 break;
239
166 case ACPI_RSC_LENGTH: 240 case ACPI_RSC_LENGTH:
167 241
168 resource->length = resource->length + info->value; 242 resource->length = resource->length + info->value;
@@ -183,6 +257,72 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
183 info->opcode); 257 info->opcode);
184 break; 258 break;
185 259
260 case ACPI_RSC_MOVE_GPIO_PIN:
261
262 /* Generate and set the PIN data pointer */
263
264 target = (char *)ACPI_ADD_PTR(void, resource,
265 (resource->length -
266 item_count * 2));
267 *(u16 **)destination = ACPI_CAST_PTR(u16, target);
268
269 /* Copy the PIN data */
270
271 source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source));
272 acpi_rs_move_data(target, source, item_count,
273 info->opcode);
274 break;
275
276 case ACPI_RSC_MOVE_GPIO_RES:
277
278 /* Generate and set the resource_source string pointer */
279
280 target = (char *)ACPI_ADD_PTR(void, resource,
281 (resource->length -
282 item_count));
283 *(u8 **)destination = ACPI_CAST_PTR(u8, target);
284
285 /* Copy the resource_source string */
286
287 source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source));
288 acpi_rs_move_data(target, source, item_count,
289 info->opcode);
290 break;
291
292 case ACPI_RSC_MOVE_SERIAL_VEN:
293
294 /* Generate and set the Vendor Data pointer */
295
296 target = (char *)ACPI_ADD_PTR(void, resource,
297 (resource->length -
298 item_count));
299 *(u8 **)destination = ACPI_CAST_PTR(u8, target);
300
301 /* Copy the Vendor Data */
302
303 source = ACPI_ADD_PTR(void, aml, info->value);
304 acpi_rs_move_data(target, source, item_count,
305 info->opcode);
306 break;
307
308 case ACPI_RSC_MOVE_SERIAL_RES:
309
310 /* Generate and set the resource_source string pointer */
311
312 target = (char *)ACPI_ADD_PTR(void, resource,
313 (resource->length -
314 item_count));
315 *(u8 **)destination = ACPI_CAST_PTR(u8, target);
316
317 /* Copy the resource_source string */
318
319 source =
320 ACPI_ADD_PTR(void, aml,
321 (ACPI_GET16(source) + info->value));
322 acpi_rs_move_data(target, source, item_count,
323 info->opcode);
324 break;
325
186 case ACPI_RSC_SET8: 326 case ACPI_RSC_SET8:
187 327
188 ACPI_MEMSET(destination, info->aml_offset, info->value); 328 ACPI_MEMSET(destination, info->aml_offset, info->value);
@@ -219,13 +359,18 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
219 * Optional resource_source (Index and String). This is the more 359 * Optional resource_source (Index and String). This is the more
220 * complicated case used by the Interrupt() macro 360 * complicated case used by the Interrupt() macro
221 */ 361 */
222 target = 362 target = ACPI_ADD_PTR(char, resource,
223 ACPI_ADD_PTR(char, resource, 363 info->aml_offset +
224 info->aml_offset + (item_count * 4)); 364 (item_count * 4));
225 365
226 resource->length += 366 resource->length +=
227 acpi_rs_get_resource_source(aml_resource_length, 367 acpi_rs_get_resource_source(aml_resource_length,
228 (acpi_rs_length) (((item_count - 1) * sizeof(u32)) + info->value), destination, aml, target); 368 (acpi_rs_length)
369 (((item_count -
370 1) * sizeof(u32)) +
371 info->value),
372 destination, aml,
373 target);
229 break; 374 break;
230 375
231 case ACPI_RSC_BITMASK: 376 case ACPI_RSC_BITMASK:
@@ -327,6 +472,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
327{ 472{
328 void *source = NULL; 473 void *source = NULL;
329 void *destination; 474 void *destination;
475 char *target;
330 acpi_rsdesc_size aml_length = 0; 476 acpi_rsdesc_size aml_length = 0;
331 u8 count; 477 u8 count;
332 u16 temp16 = 0; 478 u16 temp16 = 0;
@@ -334,6 +480,10 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
334 480
335 ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml); 481 ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml);
336 482
483 if (!info) {
484 return_ACPI_STATUS(AE_BAD_PARAMETER);
485 }
486
337 /* 487 /*
338 * First table entry must be ACPI_RSC_INITxxx and must contain the 488 * First table entry must be ACPI_RSC_INITxxx and must contain the
339 * table length (# of table entries) 489 * table length (# of table entries)
@@ -383,6 +533,14 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
383 ((ACPI_GET8(source) & 0x03) << info->value); 533 ((ACPI_GET8(source) & 0x03) << info->value);
384 break; 534 break;
385 535
536 case ACPI_RSC_3BITFLAG:
537 /*
538 * Mask and shift the flag bits
539 */
540 ACPI_SET8(destination) |= (u8)
541 ((ACPI_GET8(source) & 0x07) << info->value);
542 break;
543
386 case ACPI_RSC_COUNT: 544 case ACPI_RSC_COUNT:
387 545
388 item_count = ACPI_GET8(source); 546 item_count = ACPI_GET8(source);
@@ -400,6 +558,63 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
400 acpi_rs_set_resource_length(aml_length, aml); 558 acpi_rs_set_resource_length(aml_length, aml);
401 break; 559 break;
402 560
561 case ACPI_RSC_COUNT_GPIO_PIN:
562
563 item_count = ACPI_GET16(source);
564 ACPI_SET16(destination) = (u16)aml_length;
565
566 aml_length = (u16)(aml_length + item_count * 2);
567 target = ACPI_ADD_PTR(void, aml, info->value);
568 ACPI_SET16(target) = (u16)aml_length;
569 acpi_rs_set_resource_length(aml_length, aml);
570 break;
571
572 case ACPI_RSC_COUNT_GPIO_VEN:
573
574 item_count = ACPI_GET16(source);
575 ACPI_SET16(destination) = (u16)item_count;
576
577 aml_length =
578 (u16)(aml_length + (info->value * item_count));
579 acpi_rs_set_resource_length(aml_length, aml);
580 break;
581
582 case ACPI_RSC_COUNT_GPIO_RES:
583
584 /* Set resource source string length */
585
586 item_count = ACPI_GET16(source);
587 ACPI_SET16(destination) = (u16)aml_length;
588
589 /* Compute offset for the Vendor Data */
590
591 aml_length = (u16)(aml_length + item_count);
592 target = ACPI_ADD_PTR(void, aml, info->value);
593
594 /* Set vendor offset only if there is vendor data */
595
596 if (resource->data.gpio.vendor_length) {
597 ACPI_SET16(target) = (u16)aml_length;
598 }
599
600 acpi_rs_set_resource_length(aml_length, aml);
601 break;
602
603 case ACPI_RSC_COUNT_SERIAL_VEN:
604
605 item_count = ACPI_GET16(source);
606 ACPI_SET16(destination) = item_count + info->value;
607 aml_length = (u16)(aml_length + item_count);
608 acpi_rs_set_resource_length(aml_length, aml);
609 break;
610
611 case ACPI_RSC_COUNT_SERIAL_RES:
612
613 item_count = ACPI_GET16(source);
614 aml_length = (u16)(aml_length + item_count);
615 acpi_rs_set_resource_length(aml_length, aml);
616 break;
617
403 case ACPI_RSC_LENGTH: 618 case ACPI_RSC_LENGTH:
404 619
405 acpi_rs_set_resource_length(info->value, aml); 620 acpi_rs_set_resource_length(info->value, aml);
@@ -417,6 +632,48 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
417 info->opcode); 632 info->opcode);
418 break; 633 break;
419 634
635 case ACPI_RSC_MOVE_GPIO_PIN:
636
637 destination = (char *)ACPI_ADD_PTR(void, aml,
638 ACPI_GET16
639 (destination));
640 source = *(u16 **)source;
641 acpi_rs_move_data(destination, source, item_count,
642 info->opcode);
643 break;
644
645 case ACPI_RSC_MOVE_GPIO_RES:
646
647 /* Used for both resource_source string and vendor_data */
648
649 destination = (char *)ACPI_ADD_PTR(void, aml,
650 ACPI_GET16
651 (destination));
652 source = *(u8 **)source;
653 acpi_rs_move_data(destination, source, item_count,
654 info->opcode);
655 break;
656
657 case ACPI_RSC_MOVE_SERIAL_VEN:
658
659 destination = (char *)ACPI_ADD_PTR(void, aml,
660 (aml_length -
661 item_count));
662 source = *(u8 **)source;
663 acpi_rs_move_data(destination, source, item_count,
664 info->opcode);
665 break;
666
667 case ACPI_RSC_MOVE_SERIAL_RES:
668
669 destination = (char *)ACPI_ADD_PTR(void, aml,
670 (aml_length -
671 item_count));
672 source = *(u8 **)source;
673 acpi_rs_move_data(destination, source, item_count,
674 info->opcode);
675 break;
676
420 case ACPI_RSC_ADDRESS: 677 case ACPI_RSC_ADDRESS:
421 678
422 /* Set the Resource Type, General Flags, and Type-Specific Flags */ 679 /* Set the Resource Type, General Flags, and Type-Specific Flags */
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
new file mode 100644
index 000000000000..9aa5e689b444
--- /dev/null
+++ b/drivers/acpi/acpica/rsserial.c
@@ -0,0 +1,441 @@
1/*******************************************************************************
2 *
3 * Module Name: rsserial - GPIO/serial_bus resource descriptors
4 *
5 ******************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acresrc.h"
47
48#define _COMPONENT ACPI_RESOURCES
49ACPI_MODULE_NAME("rsserial")
50
51/*******************************************************************************
52 *
53 * acpi_rs_convert_gpio
54 *
55 ******************************************************************************/
56struct acpi_rsconvert_info acpi_rs_convert_gpio[17] = {
57 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_GPIO,
58 ACPI_RS_SIZE(struct acpi_resource_gpio),
59 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_gpio)},
60
61 {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_GPIO,
62 sizeof(struct aml_resource_gpio),
63 0},
64
65 /*
66 * These fields are contiguous in both the source and destination:
67 * revision_id
68 * connection_type
69 */
70 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.revision_id),
71 AML_OFFSET(gpio.revision_id),
72 2},
73
74 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.producer_consumer),
75 AML_OFFSET(gpio.flags),
76 0},
77
78 {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.sharable),
79 AML_OFFSET(gpio.int_flags),
80 3},
81
82 {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.io_restriction),
83 AML_OFFSET(gpio.int_flags),
84 0},
85
86 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.triggering),
87 AML_OFFSET(gpio.int_flags),
88 0},
89
90 {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.polarity),
91 AML_OFFSET(gpio.int_flags),
92 1},
93
94 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.pin_config),
95 AML_OFFSET(gpio.pin_config),
96 1},
97
98 /*
99 * These fields are contiguous in both the source and destination:
100 * drive_strength
101 * debounce_timeout
102 */
103 {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.gpio.drive_strength),
104 AML_OFFSET(gpio.drive_strength),
105 2},
106
107 /* Pin Table */
108
109 {ACPI_RSC_COUNT_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table_length),
110 AML_OFFSET(gpio.pin_table_offset),
111 AML_OFFSET(gpio.res_source_offset)},
112
113 {ACPI_RSC_MOVE_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table),
114 AML_OFFSET(gpio.pin_table_offset),
115 0},
116
117 /* Resource Source */
118
119 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.resource_source.index),
120 AML_OFFSET(gpio.res_source_index),
121 1},
122
123 {ACPI_RSC_COUNT_GPIO_RES,
124 ACPI_RS_OFFSET(data.gpio.resource_source.string_length),
125 AML_OFFSET(gpio.res_source_offset),
126 AML_OFFSET(gpio.vendor_offset)},
127
128 {ACPI_RSC_MOVE_GPIO_RES,
129 ACPI_RS_OFFSET(data.gpio.resource_source.string_ptr),
130 AML_OFFSET(gpio.res_source_offset),
131 0},
132
133 /* Vendor Data */
134
135 {ACPI_RSC_COUNT_GPIO_VEN, ACPI_RS_OFFSET(data.gpio.vendor_length),
136 AML_OFFSET(gpio.vendor_length),
137 1},
138
139 {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.gpio.vendor_data),
140 AML_OFFSET(gpio.vendor_offset),
141 0},
142};
143
144/*******************************************************************************
145 *
146 * acpi_rs_convert_i2c_serial_bus
147 *
148 ******************************************************************************/
149
150struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[16] = {
151 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
152 ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
153 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_i2c_serial_bus)},
154
155 {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
156 sizeof(struct aml_resource_i2c_serialbus),
157 0},
158
159 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
160 AML_OFFSET(common_serial_bus.revision_id),
161 1},
162
163 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
164 AML_OFFSET(common_serial_bus.type),
165 1},
166
167 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
168 AML_OFFSET(common_serial_bus.flags),
169 0},
170
171 {ACPI_RSC_1BITFLAG,
172 ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
173 AML_OFFSET(common_serial_bus.flags),
174 1},
175
176 {ACPI_RSC_MOVE8,
177 ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
178 AML_OFFSET(common_serial_bus.type_revision_id),
179 1},
180
181 {ACPI_RSC_MOVE16,
182 ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
183 AML_OFFSET(common_serial_bus.type_data_length),
184 1},
185
186 /* Vendor data */
187
188 {ACPI_RSC_COUNT_SERIAL_VEN,
189 ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
190 AML_OFFSET(common_serial_bus.type_data_length),
191 AML_RESOURCE_I2C_MIN_DATA_LEN},
192
193 {ACPI_RSC_MOVE_SERIAL_VEN,
194 ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
195 0,
196 sizeof(struct aml_resource_i2c_serialbus)},
197
198 /* Resource Source */
199
200 {ACPI_RSC_MOVE8,
201 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
202 AML_OFFSET(common_serial_bus.res_source_index),
203 1},
204
205 {ACPI_RSC_COUNT_SERIAL_RES,
206 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
207 AML_OFFSET(common_serial_bus.type_data_length),
208 sizeof(struct aml_resource_common_serialbus)},
209
210 {ACPI_RSC_MOVE_SERIAL_RES,
211 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
212 AML_OFFSET(common_serial_bus.type_data_length),
213 sizeof(struct aml_resource_common_serialbus)},
214
215 /* I2C bus type specific */
216
217 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.i2c_serial_bus.access_mode),
218 AML_OFFSET(i2c_serial_bus.type_specific_flags),
219 0},
220
221 {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.i2c_serial_bus.connection_speed),
222 AML_OFFSET(i2c_serial_bus.connection_speed),
223 1},
224
225 {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.i2c_serial_bus.slave_address),
226 AML_OFFSET(i2c_serial_bus.slave_address),
227 1},
228};
229
230/*******************************************************************************
231 *
232 * acpi_rs_convert_spi_serial_bus
233 *
234 ******************************************************************************/
235
236struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[20] = {
237 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
238 ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
239 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_spi_serial_bus)},
240
241 {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
242 sizeof(struct aml_resource_spi_serialbus),
243 0},
244
245 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
246 AML_OFFSET(common_serial_bus.revision_id),
247 1},
248
249 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
250 AML_OFFSET(common_serial_bus.type),
251 1},
252
253 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
254 AML_OFFSET(common_serial_bus.flags),
255 0},
256
257 {ACPI_RSC_1BITFLAG,
258 ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
259 AML_OFFSET(common_serial_bus.flags),
260 1},
261
262 {ACPI_RSC_MOVE8,
263 ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
264 AML_OFFSET(common_serial_bus.type_revision_id),
265 1},
266
267 {ACPI_RSC_MOVE16,
268 ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
269 AML_OFFSET(common_serial_bus.type_data_length),
270 1},
271
272 /* Vendor data */
273
274 {ACPI_RSC_COUNT_SERIAL_VEN,
275 ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
276 AML_OFFSET(common_serial_bus.type_data_length),
277 AML_RESOURCE_SPI_MIN_DATA_LEN},
278
279 {ACPI_RSC_MOVE_SERIAL_VEN,
280 ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
281 0,
282 sizeof(struct aml_resource_spi_serialbus)},
283
284 /* Resource Source */
285
286 {ACPI_RSC_MOVE8,
287 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
288 AML_OFFSET(common_serial_bus.res_source_index),
289 1},
290
291 {ACPI_RSC_COUNT_SERIAL_RES,
292 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
293 AML_OFFSET(common_serial_bus.type_data_length),
294 sizeof(struct aml_resource_common_serialbus)},
295
296 {ACPI_RSC_MOVE_SERIAL_RES,
297 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
298 AML_OFFSET(common_serial_bus.type_data_length),
299 sizeof(struct aml_resource_common_serialbus)},
300
301 /* Spi bus type specific */
302
303 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.wire_mode),
304 AML_OFFSET(spi_serial_bus.type_specific_flags),
305 0},
306
307 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.device_polarity),
308 AML_OFFSET(spi_serial_bus.type_specific_flags),
309 1},
310
311 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.data_bit_length),
312 AML_OFFSET(spi_serial_bus.data_bit_length),
313 1},
314
315 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_phase),
316 AML_OFFSET(spi_serial_bus.clock_phase),
317 1},
318
319 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_polarity),
320 AML_OFFSET(spi_serial_bus.clock_polarity),
321 1},
322
323 {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.spi_serial_bus.device_selection),
324 AML_OFFSET(spi_serial_bus.device_selection),
325 1},
326
327 {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.spi_serial_bus.connection_speed),
328 AML_OFFSET(spi_serial_bus.connection_speed),
329 1},
330};
331
332/*******************************************************************************
333 *
334 * acpi_rs_convert_uart_serial_bus
335 *
336 ******************************************************************************/
337
338struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[22] = {
339 {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
340 ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
341 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_uart_serial_bus)},
342
343 {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
344 sizeof(struct aml_resource_uart_serialbus),
345 0},
346
347 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
348 AML_OFFSET(common_serial_bus.revision_id),
349 1},
350
351 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
352 AML_OFFSET(common_serial_bus.type),
353 1},
354
355 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
356 AML_OFFSET(common_serial_bus.flags),
357 0},
358
359 {ACPI_RSC_1BITFLAG,
360 ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
361 AML_OFFSET(common_serial_bus.flags),
362 1},
363
364 {ACPI_RSC_MOVE8,
365 ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
366 AML_OFFSET(common_serial_bus.type_revision_id),
367 1},
368
369 {ACPI_RSC_MOVE16,
370 ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
371 AML_OFFSET(common_serial_bus.type_data_length),
372 1},
373
374 /* Vendor data */
375
376 {ACPI_RSC_COUNT_SERIAL_VEN,
377 ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
378 AML_OFFSET(common_serial_bus.type_data_length),
379 AML_RESOURCE_UART_MIN_DATA_LEN},
380
381 {ACPI_RSC_MOVE_SERIAL_VEN,
382 ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
383 0,
384 sizeof(struct aml_resource_uart_serialbus)},
385
386 /* Resource Source */
387
388 {ACPI_RSC_MOVE8,
389 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
390 AML_OFFSET(common_serial_bus.res_source_index),
391 1},
392
393 {ACPI_RSC_COUNT_SERIAL_RES,
394 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
395 AML_OFFSET(common_serial_bus.type_data_length),
396 sizeof(struct aml_resource_common_serialbus)},
397
398 {ACPI_RSC_MOVE_SERIAL_RES,
399 ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
400 AML_OFFSET(common_serial_bus.type_data_length),
401 sizeof(struct aml_resource_common_serialbus)},
402
403 /* Uart bus type specific */
404
405 {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.flow_control),
406 AML_OFFSET(uart_serial_bus.type_specific_flags),
407 0},
408
409 {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.stop_bits),
410 AML_OFFSET(uart_serial_bus.type_specific_flags),
411 2},
412
413 {ACPI_RSC_3BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.data_bits),
414 AML_OFFSET(uart_serial_bus.type_specific_flags),
415 4},
416
417 {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.endian),
418 AML_OFFSET(uart_serial_bus.type_specific_flags),
419 7},
420
421 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.parity),
422 AML_OFFSET(uart_serial_bus.parity),
423 1},
424
425 {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.lines_enabled),
426 AML_OFFSET(uart_serial_bus.lines_enabled),
427 1},
428
429 {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.rx_fifo_size),
430 AML_OFFSET(uart_serial_bus.rx_fifo_size),
431 1},
432
433 {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.tx_fifo_size),
434 AML_OFFSET(uart_serial_bus.tx_fifo_size),
435 1},
436
437 {ACPI_RSC_MOVE32,
438 ACPI_RS_OFFSET(data.uart_serial_bus.default_baud_rate),
439 AML_OFFSET(uart_serial_bus.default_baud_rate),
440 1},
441};
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 231811e56939..433a375deb93 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -144,6 +144,9 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
144 * since there are no alignment or endian issues 144 * since there are no alignment or endian issues
145 */ 145 */
146 case ACPI_RSC_MOVE8: 146 case ACPI_RSC_MOVE8:
147 case ACPI_RSC_MOVE_GPIO_RES:
148 case ACPI_RSC_MOVE_SERIAL_VEN:
149 case ACPI_RSC_MOVE_SERIAL_RES:
147 ACPI_MEMCPY(destination, source, item_count); 150 ACPI_MEMCPY(destination, source, item_count);
148 return; 151 return;
149 152
@@ -153,6 +156,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
153 * misaligned memory transfers 156 * misaligned memory transfers
154 */ 157 */
155 case ACPI_RSC_MOVE16: 158 case ACPI_RSC_MOVE16:
159 case ACPI_RSC_MOVE_GPIO_PIN:
156 ACPI_MOVE_16_TO_16(&ACPI_CAST_PTR(u16, destination)[i], 160 ACPI_MOVE_16_TO_16(&ACPI_CAST_PTR(u16, destination)[i],
157 &ACPI_CAST_PTR(u16, source)[i]); 161 &ACPI_CAST_PTR(u16, source)[i]);
158 break; 162 break;
@@ -590,6 +594,56 @@ acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
590 594
591/******************************************************************************* 595/*******************************************************************************
592 * 596 *
597 * FUNCTION: acpi_rs_get_aei_method_data
598 *
599 * PARAMETERS: Node - Device node
600 * ret_buffer - Pointer to a buffer structure for the
601 * results
602 *
603 * RETURN: Status
604 *
605 * DESCRIPTION: This function is called to get the _AEI value of an object
606 * contained in an object specified by the handle passed in
607 *
608 * If the function fails an appropriate status will be returned
609 * and the contents of the callers buffer is undefined.
610 *
611 ******************************************************************************/
612
613acpi_status
614acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
615 struct acpi_buffer *ret_buffer)
616{
617 union acpi_operand_object *obj_desc;
618 acpi_status status;
619
620 ACPI_FUNCTION_TRACE(rs_get_aei_method_data);
621
622 /* Parameters guaranteed valid by caller */
623
624 /* Execute the method, no parameters */
625
626 status = acpi_ut_evaluate_object(node, METHOD_NAME__AEI,
627 ACPI_BTYPE_BUFFER, &obj_desc);
628 if (ACPI_FAILURE(status)) {
629 return_ACPI_STATUS(status);
630 }
631
632 /*
633 * Make the call to create a resource linked list from the
634 * byte stream buffer that comes back from the _CRS method
635 * execution.
636 */
637 status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
638
639 /* On exit, we must delete the object returned by evaluate_object */
640
641 acpi_ut_remove_reference(obj_desc);
642 return_ACPI_STATUS(status);
643}
644
645/*******************************************************************************
646 *
593 * FUNCTION: acpi_rs_get_method_data 647 * FUNCTION: acpi_rs_get_method_data
594 * 648 *
595 * PARAMETERS: Handle - Handle to the containing object 649 * PARAMETERS: Handle - Handle to the containing object
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index fe86b37b16ce..f58c098c7aeb 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -307,6 +307,46 @@ acpi_set_current_resources(acpi_handle device_handle,
307 307
308ACPI_EXPORT_SYMBOL(acpi_set_current_resources) 308ACPI_EXPORT_SYMBOL(acpi_set_current_resources)
309 309
310/*******************************************************************************
311 *
312 * FUNCTION: acpi_get_event_resources
313 *
314 * PARAMETERS: device_handle - Handle to the device object for the
315 * device we are getting resources
316 * in_buffer - Pointer to a buffer containing the
317 * resources to be set for the device
318 *
319 * RETURN: Status
320 *
321 * DESCRIPTION: This function is called to get the event resources for a
322 * specific device. The caller must first acquire a handle for
323 * the desired device. The resource data is passed to the routine
324 * the buffer pointed to by the in_buffer variable. Uses the
325 * _AEI method.
326 *
327 ******************************************************************************/
328acpi_status
329acpi_get_event_resources(acpi_handle device_handle,
330 struct acpi_buffer *ret_buffer)
331{
332 acpi_status status;
333 struct acpi_namespace_node *node;
334
335 ACPI_FUNCTION_TRACE(acpi_get_event_resources);
336
337 /* Validate parameters then dispatch to internal routine */
338
339 status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
340 if (ACPI_FAILURE(status)) {
341 return_ACPI_STATUS(status);
342 }
343
344 status = acpi_rs_get_aei_method_data(node, ret_buffer);
345 return_ACPI_STATUS(status);
346}
347
348ACPI_EXPORT_SYMBOL(acpi_get_event_resources)
349
310/****************************************************************************** 350/******************************************************************************
311 * 351 *
312 * FUNCTION: acpi_resource_to_address64 352 * FUNCTION: acpi_resource_to_address64
@@ -486,8 +526,9 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
486 * 526 *
487 * PARAMETERS: device_handle - Handle to the device object for the 527 * PARAMETERS: device_handle - Handle to the device object for the
488 * device we are querying 528 * device we are querying
489 * Name - Method name of the resources we want 529 * Name - Method name of the resources we want.
490 * (METHOD_NAME__CRS or METHOD_NAME__PRS) 530 * (METHOD_NAME__CRS, METHOD_NAME__PRS, or
531 * METHOD_NAME__AEI)
491 * user_function - Called for each resource 532 * user_function - Called for each resource
492 * Context - Passed to user_function 533 * Context - Passed to user_function
493 * 534 *
@@ -514,11 +555,12 @@ acpi_walk_resources(acpi_handle device_handle,
514 555
515 if (!device_handle || !user_function || !name || 556 if (!device_handle || !user_function || !name ||
516 (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) && 557 (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
517 !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS))) { 558 !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
559 !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) {
518 return_ACPI_STATUS(AE_BAD_PARAMETER); 560 return_ACPI_STATUS(AE_BAD_PARAMETER);
519 } 561 }
520 562
521 /* Get the _CRS or _PRS resource list */ 563 /* Get the _CRS/_PRS/_AEI resource list */
522 564
523 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; 565 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
524 status = acpi_rs_get_method_data(device_handle, name, &buffer); 566 status = acpi_rs_get_method_data(device_handle, name, &buffer);
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 6f5588e62c0a..c5d870406f41 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -63,14 +63,15 @@ static void acpi_tb_setup_fadt_registers(void);
63 63
64typedef struct acpi_fadt_info { 64typedef struct acpi_fadt_info {
65 char *name; 65 char *name;
66 u8 address64; 66 u16 address64;
67 u8 address32; 67 u16 address32;
68 u8 length; 68 u16 length;
69 u8 default_length; 69 u8 default_length;
70 u8 type; 70 u8 type;
71 71
72} acpi_fadt_info; 72} acpi_fadt_info;
73 73
74#define ACPI_FADT_OPTIONAL 0
74#define ACPI_FADT_REQUIRED 1 75#define ACPI_FADT_REQUIRED 1
75#define ACPI_FADT_SEPARATE_LENGTH 2 76#define ACPI_FADT_SEPARATE_LENGTH 2
76 77
@@ -87,7 +88,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
87 ACPI_FADT_OFFSET(pm1b_event_block), 88 ACPI_FADT_OFFSET(pm1b_event_block),
88 ACPI_FADT_OFFSET(pm1_event_length), 89 ACPI_FADT_OFFSET(pm1_event_length),
89 ACPI_PM1_REGISTER_WIDTH * 2, /* Enable + Status register */ 90 ACPI_PM1_REGISTER_WIDTH * 2, /* Enable + Status register */
90 0}, 91 ACPI_FADT_OPTIONAL},
91 92
92 {"Pm1aControlBlock", 93 {"Pm1aControlBlock",
93 ACPI_FADT_OFFSET(xpm1a_control_block), 94 ACPI_FADT_OFFSET(xpm1a_control_block),
@@ -101,7 +102,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
101 ACPI_FADT_OFFSET(pm1b_control_block), 102 ACPI_FADT_OFFSET(pm1b_control_block),
102 ACPI_FADT_OFFSET(pm1_control_length), 103 ACPI_FADT_OFFSET(pm1_control_length),
103 ACPI_PM1_REGISTER_WIDTH, 104 ACPI_PM1_REGISTER_WIDTH,
104 0}, 105 ACPI_FADT_OPTIONAL},
105 106
106 {"Pm2ControlBlock", 107 {"Pm2ControlBlock",
107 ACPI_FADT_OFFSET(xpm2_control_block), 108 ACPI_FADT_OFFSET(xpm2_control_block),
@@ -139,7 +140,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
139 140
140typedef struct acpi_fadt_pm_info { 141typedef struct acpi_fadt_pm_info {
141 struct acpi_generic_address *target; 142 struct acpi_generic_address *target;
142 u8 source; 143 u16 source;
143 u8 register_num; 144 u8 register_num;
144 145
145} acpi_fadt_pm_info; 146} acpi_fadt_pm_info;
@@ -253,8 +254,13 @@ void acpi_tb_parse_fadt(u32 table_index)
253 acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt, 254 acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
254 ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT); 255 ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT);
255 256
256 acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xfacs, 257 /* If Hardware Reduced flag is set, there is no FACS */
257 ACPI_SIG_FACS, ACPI_TABLE_INDEX_FACS); 258
259 if (!acpi_gbl_reduced_hardware) {
260 acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.
261 Xfacs, ACPI_SIG_FACS,
262 ACPI_TABLE_INDEX_FACS);
263 }
258} 264}
259 265
260/******************************************************************************* 266/*******************************************************************************
@@ -277,12 +283,12 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
277{ 283{
278 /* 284 /*
279 * Check if the FADT is larger than the largest table that we expect 285 * Check if the FADT is larger than the largest table that we expect
280 * (the ACPI 2.0/3.0 version). If so, truncate the table, and issue 286 * (the ACPI 5.0 version). If so, truncate the table, and issue
281 * a warning. 287 * a warning.
282 */ 288 */
283 if (length > sizeof(struct acpi_table_fadt)) { 289 if (length > sizeof(struct acpi_table_fadt)) {
284 ACPI_WARNING((AE_INFO, 290 ACPI_WARNING((AE_INFO,
285 "FADT (revision %u) is longer than ACPI 2.0 version, " 291 "FADT (revision %u) is longer than ACPI 5.0 version, "
286 "truncating length %u to %u", 292 "truncating length %u to %u",
287 table->revision, length, 293 table->revision, length,
288 (u32)sizeof(struct acpi_table_fadt))); 294 (u32)sizeof(struct acpi_table_fadt)));
@@ -297,6 +303,13 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
297 ACPI_MEMCPY(&acpi_gbl_FADT, table, 303 ACPI_MEMCPY(&acpi_gbl_FADT, table,
298 ACPI_MIN(length, sizeof(struct acpi_table_fadt))); 304 ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
299 305
306 /* Take a copy of the Hardware Reduced flag */
307
308 acpi_gbl_reduced_hardware = FALSE;
309 if (acpi_gbl_FADT.flags & ACPI_FADT_HW_REDUCED) {
310 acpi_gbl_reduced_hardware = TRUE;
311 }
312
300 /* Convert the local copy of the FADT to the common internal format */ 313 /* Convert the local copy of the FADT to the common internal format */
301 314
302 acpi_tb_convert_fadt(); 315 acpi_tb_convert_fadt();
@@ -502,6 +515,12 @@ static void acpi_tb_validate_fadt(void)
502 acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt; 515 acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
503 } 516 }
504 517
518 /* If Hardware Reduced flag is set, we are all done */
519
520 if (acpi_gbl_reduced_hardware) {
521 return;
522 }
523
505 /* Examine all of the 64-bit extended address fields (X fields) */ 524 /* Examine all of the 64-bit extended address fields (X fields) */
506 525
507 for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) { 526 for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index a55cb2bb5abb..4903e36ea75a 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 62365f6075dd..1aecf7baa4e0 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 0f2d395feaba..09ca39e14337 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -135,6 +135,13 @@ acpi_status acpi_tb_initialize_facs(void)
135{ 135{
136 acpi_status status; 136 acpi_status status;
137 137
138 /* If Hardware Reduced flag is set, there is no FACS */
139
140 if (acpi_gbl_reduced_hardware) {
141 acpi_gbl_FACS = NULL;
142 return (AE_OK);
143 }
144
138 status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, 145 status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
139 ACPI_CAST_INDIRECT_PTR(struct 146 ACPI_CAST_INDIRECT_PTR(struct
140 acpi_table_header, 147 acpi_table_header,
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index e7d13f5d3f2d..abcc6412c244 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2011, Intel Corp. 9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 7eb6c6cc1edf..4258f647ca3d 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
new file mode 100644
index 000000000000..67932aebe6dd
--- /dev/null
+++ b/drivers/acpi/acpica/utaddress.c
@@ -0,0 +1,294 @@
1/******************************************************************************
2 *
3 * Module Name: utaddress - op_region address range check
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acnamesp.h"
47
48#define _COMPONENT ACPI_UTILITIES
49ACPI_MODULE_NAME("utaddress")
50
51/*******************************************************************************
52 *
53 * FUNCTION: acpi_ut_add_address_range
54 *
55 * PARAMETERS: space_id - Address space ID
56 * Address - op_region start address
57 * Length - op_region length
58 * region_node - op_region namespace node
59 *
60 * RETURN: Status
61 *
62 * DESCRIPTION: Add the Operation Region address range to the global list.
63 * The only supported Space IDs are Memory and I/O. Called when
64 * the op_region address/length operands are fully evaluated.
65 *
66 * MUTEX: Locks the namespace
67 *
68 * NOTE: Because this interface is only called when an op_region argument
69 * list is evaluated, there cannot be any duplicate region_nodes.
70 * Duplicate Address/Length values are allowed, however, so that multiple
71 * address conflicts can be detected.
72 *
73 ******************************************************************************/
74acpi_status
75acpi_ut_add_address_range(acpi_adr_space_type space_id,
76 acpi_physical_address address,
77 u32 length, struct acpi_namespace_node *region_node)
78{
79 struct acpi_address_range *range_info;
80 acpi_status status;
81
82 ACPI_FUNCTION_TRACE(ut_add_address_range);
83
84 if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
85 (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
86 return_ACPI_STATUS(AE_OK);
87 }
88
89 /* Allocate/init a new info block, add it to the appropriate list */
90
91 range_info = ACPI_ALLOCATE(sizeof(struct acpi_address_range));
92 if (!range_info) {
93 return_ACPI_STATUS(AE_NO_MEMORY);
94 }
95
96 range_info->start_address = address;
97 range_info->end_address = (address + length - 1);
98 range_info->region_node = region_node;
99
100 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
101 if (ACPI_FAILURE(status)) {
102 ACPI_FREE(range_info);
103 return_ACPI_STATUS(status);
104 }
105
106 range_info->next = acpi_gbl_address_range_list[space_id];
107 acpi_gbl_address_range_list[space_id] = range_info;
108
109 ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
110 "\nAdded [%4.4s] address range: 0x%p-0x%p\n",
111 acpi_ut_get_node_name(range_info->region_node),
112 ACPI_CAST_PTR(void, address),
113 ACPI_CAST_PTR(void, range_info->end_address)));
114
115 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
116 return_ACPI_STATUS(AE_OK);
117}
118
119/*******************************************************************************
120 *
121 * FUNCTION: acpi_ut_remove_address_range
122 *
123 * PARAMETERS: space_id - Address space ID
124 * region_node - op_region namespace node
125 *
126 * RETURN: None
127 *
128 * DESCRIPTION: Remove the Operation Region from the global list. The only
129 * supported Space IDs are Memory and I/O. Called when an
130 * op_region is deleted.
131 *
132 * MUTEX: Assumes the namespace is locked
133 *
134 ******************************************************************************/
135
136void
137acpi_ut_remove_address_range(acpi_adr_space_type space_id,
138 struct acpi_namespace_node *region_node)
139{
140 struct acpi_address_range *range_info;
141 struct acpi_address_range *prev;
142
143 ACPI_FUNCTION_TRACE(ut_remove_address_range);
144
145 if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
146 (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
147 return_VOID;
148 }
149
150 /* Get the appropriate list head and check the list */
151
152 range_info = prev = acpi_gbl_address_range_list[space_id];
153 while (range_info) {
154 if (range_info->region_node == region_node) {
155 if (range_info == prev) { /* Found at list head */
156 acpi_gbl_address_range_list[space_id] =
157 range_info->next;
158 } else {
159 prev->next = range_info->next;
160 }
161
162 ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
163 "\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
164 acpi_ut_get_node_name(range_info->
165 region_node),
166 ACPI_CAST_PTR(void,
167 range_info->
168 start_address),
169 ACPI_CAST_PTR(void,
170 range_info->
171 end_address)));
172
173 ACPI_FREE(range_info);
174 return_VOID;
175 }
176
177 prev = range_info;
178 range_info = range_info->next;
179 }
180
181 return_VOID;
182}
183
184/*******************************************************************************
185 *
186 * FUNCTION: acpi_ut_check_address_range
187 *
188 * PARAMETERS: space_id - Address space ID
189 * Address - Start address
190 * Length - Length of address range
191 * Warn - TRUE if warning on overlap desired
192 *
193 * RETURN: Count of the number of conflicts detected. Zero is always
194 * returned for Space IDs other than Memory or I/O.
195 *
196 * DESCRIPTION: Check if the input address range overlaps any of the
197 * ASL operation region address ranges. The only supported
198 * Space IDs are Memory and I/O.
199 *
200 * MUTEX: Assumes the namespace is locked.
201 *
202 ******************************************************************************/
203
204u32
205acpi_ut_check_address_range(acpi_adr_space_type space_id,
206 acpi_physical_address address, u32 length, u8 warn)
207{
208 struct acpi_address_range *range_info;
209 acpi_physical_address end_address;
210 char *pathname;
211 u32 overlap_count = 0;
212
213 ACPI_FUNCTION_TRACE(ut_check_address_range);
214
215 if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
216 (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
217 return_UINT32(0);
218 }
219
220 range_info = acpi_gbl_address_range_list[space_id];
221 end_address = address + length - 1;
222
223 /* Check entire list for all possible conflicts */
224
225 while (range_info) {
226 /*
227 * Check if the requested Address/Length overlaps this address_range.
228 * Four cases to consider:
229 *
230 * 1) Input address/length is contained completely in the address range
231 * 2) Input address/length overlaps range at the range start
232 * 3) Input address/length overlaps range at the range end
233 * 4) Input address/length completely encompasses the range
234 */
235 if ((address <= range_info->end_address) &&
236 (end_address >= range_info->start_address)) {
237
238 /* Found an address range overlap */
239
240 overlap_count++;
241 if (warn) { /* Optional warning message */
242 pathname =
243 acpi_ns_get_external_pathname(range_info->
244 region_node);
245
246 ACPI_WARNING((AE_INFO,
247 "0x%p-0x%p %s conflicts with Region %s %d",
248 ACPI_CAST_PTR(void, address),
249 ACPI_CAST_PTR(void, end_address),
250 acpi_ut_get_region_name(space_id),
251 pathname, overlap_count));
252 ACPI_FREE(pathname);
253 }
254 }
255
256 range_info = range_info->next;
257 }
258
259 return_UINT32(overlap_count);
260}
261
262/*******************************************************************************
263 *
264 * FUNCTION: acpi_ut_delete_address_lists
265 *
266 * PARAMETERS: None
267 *
268 * RETURN: None
269 *
270 * DESCRIPTION: Delete all global address range lists (called during
271 * subsystem shutdown).
272 *
273 ******************************************************************************/
274
275void acpi_ut_delete_address_lists(void)
276{
277 struct acpi_address_range *next;
278 struct acpi_address_range *range_info;
279 int i;
280
281 /* Delete all elements in all address range lists */
282
283 for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
284 next = acpi_gbl_address_range_list[i];
285
286 while (next) {
287 range_info = next;
288 next = range_info->next;
289 ACPI_FREE(range_info);
290 }
291
292 acpi_gbl_address_range_list[i] = NULL;
293 }
294}
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 0a697351cf69..9982d2ea66fb 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index aded299a2fa8..3317c0a406ee 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index a1f8d7509e66..a0998a886318 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 8b087e2d64f4..d42ede5260c7 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -171,7 +171,9 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
171 "SMBus", 171 "SMBus",
172 "SystemCMOS", 172 "SystemCMOS",
173 "PCIBARTarget", 173 "PCIBARTarget",
174 "IPMI" 174 "IPMI",
175 "GeneralPurposeIo",
176 "GenericSerialBus"
175}; 177};
176 178
177char *acpi_ut_get_region_name(u8 space_id) 179char *acpi_ut_get_region_name(u8 space_id)
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 31f5a7832ef1..2a6c3e183697 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -215,11 +215,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
215 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, 215 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
216 "***** Region %p\n", object)); 216 "***** Region %p\n", object));
217 217
218 /* Invalidate the region address/length via the host OS */ 218 /*
219 219 * Update address_range list. However, only permanent regions
220 acpi_os_invalidate_address(object->region.space_id, 220 * are installed in this list. (Not created within a method)
221 object->region.address, 221 */
222 (acpi_size) object->region.length); 222 if (!(object->region.node->flags & ANOBJ_TEMPORARY)) {
223 acpi_ut_remove_address_range(object->region.space_id,
224 object->region.node);
225 }
223 226
224 second_desc = acpi_ns_get_secondary_object(object); 227 second_desc = acpi_ns_get_secondary_object(object);
225 if (second_desc) { 228 if (second_desc) {
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 18f73c9d10bc..479f32b33415 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index ffba0a39c3e8..4153584cf526 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -264,6 +264,12 @@ acpi_status acpi_ut_init_globals(void)
264 return_ACPI_STATUS(status); 264 return_ACPI_STATUS(status);
265 } 265 }
266 266
267 /* Address Range lists */
268
269 for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
270 acpi_gbl_address_range_list[i] = NULL;
271 }
272
267 /* Mutex locked flags */ 273 /* Mutex locked flags */
268 274
269 for (i = 0; i < ACPI_NUM_MUTEX; i++) { 275 for (i = 0; i < ACPI_NUM_MUTEX; i++) {
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index b679ea693545..c92eb1d93785 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 191b6828cce9..8359c0c5dc98 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -92,6 +92,7 @@ static void acpi_ut_terminate(void)
92 gpe_xrupt_info = next_gpe_xrupt_info; 92 gpe_xrupt_info = next_gpe_xrupt_info;
93 } 93 }
94 94
95 acpi_ut_delete_address_lists();
95 return_VOID; 96 return_VOID;
96} 97}
97 98
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index f6bb75c6faf5..155fd786d0f2 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index ce481da9bb45..2491a552b0e6 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index c33a852d4f42..86f19db74e05 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 7d797e2baecd..43174df33121 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -293,14 +293,10 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
293 293
294acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) 294acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
295{ 295{
296 acpi_thread_id this_thread_id;
297
298 ACPI_FUNCTION_NAME(ut_release_mutex); 296 ACPI_FUNCTION_NAME(ut_release_mutex);
299 297
300 this_thread_id = acpi_os_get_thread_id();
301
302 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u releasing Mutex [%s]\n", 298 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u releasing Mutex [%s]\n",
303 (u32)this_thread_id, 299 (u32)acpi_os_get_thread_id(),
304 acpi_ut_get_mutex_name(mutex_id))); 300 acpi_ut_get_mutex_name(mutex_id)));
305 301
306 if (mutex_id > ACPI_MAX_MUTEX) { 302 if (mutex_id > ACPI_MAX_MUTEX) {
@@ -329,7 +325,8 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
329 * the ACPI subsystem code. 325 * the ACPI subsystem code.
330 */ 326 */
331 for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { 327 for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
332 if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) { 328 if (acpi_gbl_mutex_info[i].thread_id ==
329 acpi_os_get_thread_id()) {
333 if (i == mutex_id) { 330 if (i == mutex_id) {
334 continue; 331 continue;
335 } 332 }
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 188340a017b4..b112744fc9ae 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 1fb10cb8f11d..2360cf70c18c 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 6ffd3a8bdaa5..9d441ea70305 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,7 @@
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include "accommon.h" 45#include "accommon.h"
46#include "amlresrc.h" 46#include "acresrc.h"
47 47
48#define _COMPONENT ACPI_UTILITIES 48#define _COMPONENT ACPI_UTILITIES
49ACPI_MODULE_NAME("utresrc") 49ACPI_MODULE_NAME("utresrc")
@@ -154,6 +154,138 @@ const char *acpi_gbl_typ_decode[] = {
154 "TypeF" 154 "TypeF"
155}; 155};
156 156
157const char *acpi_gbl_ppc_decode[] = {
158 "PullDefault",
159 "PullUp",
160 "PullDown",
161 "PullNone"
162};
163
164const char *acpi_gbl_ior_decode[] = {
165 "IoRestrictionNone",
166 "IoRestrictionInputOnly",
167 "IoRestrictionOutputOnly",
168 "IoRestrictionNoneAndPreserve"
169};
170
171const char *acpi_gbl_dts_decode[] = {
172 "Width8bit",
173 "Width16bit",
174 "Width32bit",
175 "Width64bit",
176 "Width128bit",
177 "Width256bit",
178};
179
180/* GPIO connection type */
181
182const char *acpi_gbl_ct_decode[] = {
183 "Interrupt",
184 "I/O"
185};
186
187/* Serial bus type */
188
189const char *acpi_gbl_sbt_decode[] = {
190 "/* UNKNOWN serial bus type */",
191 "I2C",
192 "SPI",
193 "UART"
194};
195
196/* I2C serial bus access mode */
197
198const char *acpi_gbl_am_decode[] = {
199 "AddressingMode7Bit",
200 "AddressingMode10Bit"
201};
202
203/* I2C serial bus slave mode */
204
205const char *acpi_gbl_sm_decode[] = {
206 "ControllerInitiated",
207 "DeviceInitiated"
208};
209
210/* SPI serial bus wire mode */
211
212const char *acpi_gbl_wm_decode[] = {
213 "FourWireMode",
214 "ThreeWireMode"
215};
216
217/* SPI serial clock phase */
218
219const char *acpi_gbl_cph_decode[] = {
220 "ClockPhaseFirst",
221 "ClockPhaseSecond"
222};
223
224/* SPI serial bus clock polarity */
225
226const char *acpi_gbl_cpo_decode[] = {
227 "ClockPolarityLow",
228 "ClockPolarityHigh"
229};
230
231/* SPI serial bus device polarity */
232
233const char *acpi_gbl_dp_decode[] = {
234 "PolarityLow",
235 "PolarityHigh"
236};
237
238/* UART serial bus endian */
239
240const char *acpi_gbl_ed_decode[] = {
241 "LittleEndian",
242 "BigEndian"
243};
244
245/* UART serial bus bits per byte */
246
247const char *acpi_gbl_bpb_decode[] = {
248 "DataBitsFive",
249 "DataBitsSix",
250 "DataBitsSeven",
251 "DataBitsEight",
252 "DataBitsNine",
253 "/* UNKNOWN Bits per byte */",
254 "/* UNKNOWN Bits per byte */",
255 "/* UNKNOWN Bits per byte */"
256};
257
258/* UART serial bus stop bits */
259
260const char *acpi_gbl_sb_decode[] = {
261 "StopBitsNone",
262 "StopBitsOne",
263 "StopBitsOnePlusHalf",
264 "StopBitsTwo"
265};
266
267/* UART serial bus flow control */
268
269const char *acpi_gbl_fc_decode[] = {
270 "FlowControlNone",
271 "FlowControlHardware",
272 "FlowControlXON",
273 "/* UNKNOWN flow control keyword */"
274};
275
276/* UART serial bus parity type */
277
278const char *acpi_gbl_pt_decode[] = {
279 "ParityTypeNone",
280 "ParityTypeEven",
281 "ParityTypeOdd",
282 "ParityTypeMark",
283 "ParityTypeSpace",
284 "/* UNKNOWN parity keyword */",
285 "/* UNKNOWN parity keyword */",
286 "/* UNKNOWN parity keyword */"
287};
288
157#endif 289#endif
158 290
159/* 291/*
@@ -173,7 +305,7 @@ const u8 acpi_gbl_resource_aml_sizes[] = {
173 ACPI_AML_SIZE_SMALL(struct aml_resource_end_dependent), 305 ACPI_AML_SIZE_SMALL(struct aml_resource_end_dependent),
174 ACPI_AML_SIZE_SMALL(struct aml_resource_io), 306 ACPI_AML_SIZE_SMALL(struct aml_resource_io),
175 ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_io), 307 ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_io),
176 0, 308 ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_dma),
177 0, 309 0,
178 0, 310 0,
179 0, 311 0,
@@ -193,7 +325,17 @@ const u8 acpi_gbl_resource_aml_sizes[] = {
193 ACPI_AML_SIZE_LARGE(struct aml_resource_address16), 325 ACPI_AML_SIZE_LARGE(struct aml_resource_address16),
194 ACPI_AML_SIZE_LARGE(struct aml_resource_extended_irq), 326 ACPI_AML_SIZE_LARGE(struct aml_resource_extended_irq),
195 ACPI_AML_SIZE_LARGE(struct aml_resource_address64), 327 ACPI_AML_SIZE_LARGE(struct aml_resource_address64),
196 ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64) 328 ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64),
329 ACPI_AML_SIZE_LARGE(struct aml_resource_gpio),
330 0,
331 ACPI_AML_SIZE_LARGE(struct aml_resource_common_serialbus),
332};
333
334const u8 acpi_gbl_resource_aml_serial_bus_sizes[] = {
335 0,
336 ACPI_AML_SIZE_LARGE(struct aml_resource_i2c_serialbus),
337 ACPI_AML_SIZE_LARGE(struct aml_resource_spi_serialbus),
338 ACPI_AML_SIZE_LARGE(struct aml_resource_uart_serialbus),
197}; 339};
198 340
199/* 341/*
@@ -209,35 +351,49 @@ static const u8 acpi_gbl_resource_types[] = {
209 0, 351 0,
210 0, 352 0,
211 0, 353 0,
212 ACPI_SMALL_VARIABLE_LENGTH, 354 ACPI_SMALL_VARIABLE_LENGTH, /* 04 IRQ */
213 ACPI_FIXED_LENGTH, 355 ACPI_FIXED_LENGTH, /* 05 DMA */
214 ACPI_SMALL_VARIABLE_LENGTH, 356 ACPI_SMALL_VARIABLE_LENGTH, /* 06 start_dependent_functions */
215 ACPI_FIXED_LENGTH, 357 ACPI_FIXED_LENGTH, /* 07 end_dependent_functions */
216 ACPI_FIXED_LENGTH, 358 ACPI_FIXED_LENGTH, /* 08 IO */
217 ACPI_FIXED_LENGTH, 359 ACPI_FIXED_LENGTH, /* 09 fixed_iO */
218 0, 360 ACPI_FIXED_LENGTH, /* 0_a fixed_dMA */
219 0, 361 0,
220 0, 362 0,
221 0, 363 0,
222 ACPI_VARIABLE_LENGTH, 364 ACPI_VARIABLE_LENGTH, /* 0_e vendor_short */
223 ACPI_FIXED_LENGTH, 365 ACPI_FIXED_LENGTH, /* 0_f end_tag */
224 366
225 /* Large descriptors */ 367 /* Large descriptors */
226 368
227 0, 369 0,
228 ACPI_FIXED_LENGTH, 370 ACPI_FIXED_LENGTH, /* 01 Memory24 */
229 ACPI_FIXED_LENGTH, 371 ACPI_FIXED_LENGTH, /* 02 generic_register */
230 0, 372 0,
231 ACPI_VARIABLE_LENGTH, 373 ACPI_VARIABLE_LENGTH, /* 04 vendor_long */
232 ACPI_FIXED_LENGTH, 374 ACPI_FIXED_LENGTH, /* 05 Memory32 */
233 ACPI_FIXED_LENGTH, 375 ACPI_FIXED_LENGTH, /* 06 memory32_fixed */
234 ACPI_VARIABLE_LENGTH, 376 ACPI_VARIABLE_LENGTH, /* 07 Dword* address */
235 ACPI_VARIABLE_LENGTH, 377 ACPI_VARIABLE_LENGTH, /* 08 Word* address */
236 ACPI_VARIABLE_LENGTH, 378 ACPI_VARIABLE_LENGTH, /* 09 extended_iRQ */
237 ACPI_VARIABLE_LENGTH, 379 ACPI_VARIABLE_LENGTH, /* 0_a Qword* address */
238 ACPI_FIXED_LENGTH 380 ACPI_FIXED_LENGTH, /* 0_b Extended* address */
381 ACPI_VARIABLE_LENGTH, /* 0_c Gpio* */
382 0,
383 ACPI_VARIABLE_LENGTH /* 0_e *serial_bus */
239}; 384};
240 385
386/*
387 * For the i_aSL compiler/disassembler, we don't want any error messages
388 * because the disassembler uses the resource validation code to determine
389 * if Buffer objects are actually Resource Templates.
390 */
391#ifdef ACPI_ASL_COMPILER
392#define ACPI_RESOURCE_ERROR(plist)
393#else
394#define ACPI_RESOURCE_ERROR(plist) ACPI_ERROR(plist)
395#endif
396
241/******************************************************************************* 397/*******************************************************************************
242 * 398 *
243 * FUNCTION: acpi_ut_walk_aml_resources 399 * FUNCTION: acpi_ut_walk_aml_resources
@@ -265,6 +421,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
265 u8 resource_index; 421 u8 resource_index;
266 u32 length; 422 u32 length;
267 u32 offset = 0; 423 u32 offset = 0;
424 u8 end_tag[2] = { 0x79, 0x00 };
268 425
269 ACPI_FUNCTION_TRACE(ut_walk_aml_resources); 426 ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
270 427
@@ -286,6 +443,10 @@ acpi_ut_walk_aml_resources(u8 * aml,
286 443
287 status = acpi_ut_validate_resource(aml, &resource_index); 444 status = acpi_ut_validate_resource(aml, &resource_index);
288 if (ACPI_FAILURE(status)) { 445 if (ACPI_FAILURE(status)) {
446 /*
447 * Exit on failure. Cannot continue because the descriptor length
448 * may be bogus also.
449 */
289 return_ACPI_STATUS(status); 450 return_ACPI_STATUS(status);
290 } 451 }
291 452
@@ -300,7 +461,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
300 user_function(aml, length, offset, resource_index, 461 user_function(aml, length, offset, resource_index,
301 context); 462 context);
302 if (ACPI_FAILURE(status)) { 463 if (ACPI_FAILURE(status)) {
303 return (status); 464 return_ACPI_STATUS(status);
304 } 465 }
305 } 466 }
306 467
@@ -333,7 +494,19 @@ acpi_ut_walk_aml_resources(u8 * aml,
333 494
334 /* Did not find an end_tag descriptor */ 495 /* Did not find an end_tag descriptor */
335 496
336 return (AE_AML_NO_RESOURCE_END_TAG); 497 if (user_function) {
498
499 /* Insert an end_tag anyway. acpi_rs_get_list_length always leaves room */
500
501 (void)acpi_ut_validate_resource(end_tag, &resource_index);
502 status =
503 user_function(end_tag, 2, offset, resource_index, context);
504 if (ACPI_FAILURE(status)) {
505 return_ACPI_STATUS(status);
506 }
507 }
508
509 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
337} 510}
338 511
339/******************************************************************************* 512/*******************************************************************************
@@ -354,6 +527,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
354 527
355acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index) 528acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
356{ 529{
530 union aml_resource *aml_resource;
357 u8 resource_type; 531 u8 resource_type;
358 u8 resource_index; 532 u8 resource_index;
359 acpi_rs_length resource_length; 533 acpi_rs_length resource_length;
@@ -375,7 +549,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
375 /* Verify the large resource type (name) against the max */ 549 /* Verify the large resource type (name) against the max */
376 550
377 if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) { 551 if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) {
378 return (AE_AML_INVALID_RESOURCE_TYPE); 552 goto invalid_resource;
379 } 553 }
380 554
381 /* 555 /*
@@ -392,15 +566,17 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
392 ((resource_type & ACPI_RESOURCE_NAME_SMALL_MASK) >> 3); 566 ((resource_type & ACPI_RESOURCE_NAME_SMALL_MASK) >> 3);
393 } 567 }
394 568
395 /* Check validity of the resource type, zero indicates name is invalid */ 569 /*
396 570 * Check validity of the resource type, via acpi_gbl_resource_types. Zero
571 * indicates an invalid resource.
572 */
397 if (!acpi_gbl_resource_types[resource_index]) { 573 if (!acpi_gbl_resource_types[resource_index]) {
398 return (AE_AML_INVALID_RESOURCE_TYPE); 574 goto invalid_resource;
399 } 575 }
400 576
401 /* 577 /*
402 * 2) Validate the resource_length field. This ensures that the length 578 * Validate the resource_length field. This ensures that the length
403 * is at least reasonable, and guarantees that it is non-zero. 579 * is at least reasonable, and guarantees that it is non-zero.
404 */ 580 */
405 resource_length = acpi_ut_get_resource_length(aml); 581 resource_length = acpi_ut_get_resource_length(aml);
406 minimum_resource_length = acpi_gbl_resource_aml_sizes[resource_index]; 582 minimum_resource_length = acpi_gbl_resource_aml_sizes[resource_index];
@@ -413,7 +589,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
413 /* Fixed length resource, length must match exactly */ 589 /* Fixed length resource, length must match exactly */
414 590
415 if (resource_length != minimum_resource_length) { 591 if (resource_length != minimum_resource_length) {
416 return (AE_AML_BAD_RESOURCE_LENGTH); 592 goto bad_resource_length;
417 } 593 }
418 break; 594 break;
419 595
@@ -422,7 +598,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
422 /* Variable length resource, length must be at least the minimum */ 598 /* Variable length resource, length must be at least the minimum */
423 599
424 if (resource_length < minimum_resource_length) { 600 if (resource_length < minimum_resource_length) {
425 return (AE_AML_BAD_RESOURCE_LENGTH); 601 goto bad_resource_length;
426 } 602 }
427 break; 603 break;
428 604
@@ -432,7 +608,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
432 608
433 if ((resource_length > minimum_resource_length) || 609 if ((resource_length > minimum_resource_length) ||
434 (resource_length < (minimum_resource_length - 1))) { 610 (resource_length < (minimum_resource_length - 1))) {
435 return (AE_AML_BAD_RESOURCE_LENGTH); 611 goto bad_resource_length;
436 } 612 }
437 break; 613 break;
438 614
@@ -440,7 +616,23 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
440 616
441 /* Shouldn't happen (because of validation earlier), but be sure */ 617 /* Shouldn't happen (because of validation earlier), but be sure */
442 618
443 return (AE_AML_INVALID_RESOURCE_TYPE); 619 goto invalid_resource;
620 }
621
622 aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
623 if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) {
624
625 /* Validate the bus_type field */
626
627 if ((aml_resource->common_serial_bus.type == 0) ||
628 (aml_resource->common_serial_bus.type >
629 AML_RESOURCE_MAX_SERIALBUSTYPE)) {
630 ACPI_RESOURCE_ERROR((AE_INFO,
631 "Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
632 aml_resource->common_serial_bus.
633 type));
634 return (AE_AML_INVALID_RESOURCE_TYPE);
635 }
444 } 636 }
445 637
446 /* Optionally return the resource table index */ 638 /* Optionally return the resource table index */
@@ -450,6 +642,22 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
450 } 642 }
451 643
452 return (AE_OK); 644 return (AE_OK);
645
646 invalid_resource:
647
648 ACPI_RESOURCE_ERROR((AE_INFO,
649 "Invalid/unsupported resource descriptor: Type 0x%2.2X",
650 resource_type));
651 return (AE_AML_INVALID_RESOURCE_TYPE);
652
653 bad_resource_length:
654
655 ACPI_RESOURCE_ERROR((AE_INFO,
656 "Invalid resource descriptor length: Type "
657 "0x%2.2X, Length 0x%4.4X, MinLength 0x%4.4X",
658 resource_type, resource_length,
659 minimum_resource_length));
660 return (AE_AML_BAD_RESOURCE_LENGTH);
453} 661}
454 662
455/******************************************************************************* 663/*******************************************************************************
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index 30c21e1a9360..4267477c2797 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 420ebfe08c72..644e8c8ebc4b 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,7 @@
48#include "acnamesp.h" 48#include "acnamesp.h"
49#include "acdebug.h" 49#include "acdebug.h"
50#include "actables.h" 50#include "actables.h"
51#include "acinterp.h"
51 52
52#define _COMPONENT ACPI_UTILITIES 53#define _COMPONENT ACPI_UTILITIES
53ACPI_MODULE_NAME("utxface") 54ACPI_MODULE_NAME("utxface")
@@ -640,4 +641,41 @@ acpi_status acpi_install_interface_handler(acpi_interface_handler handler)
640} 641}
641 642
642ACPI_EXPORT_SYMBOL(acpi_install_interface_handler) 643ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
644
645/*****************************************************************************
646 *
647 * FUNCTION: acpi_check_address_range
648 *
649 * PARAMETERS: space_id - Address space ID
650 * Address - Start address
651 * Length - Length
652 * Warn - TRUE if warning on overlap desired
653 *
654 * RETURN: Count of the number of conflicts detected.
655 *
656 * DESCRIPTION: Check if the input address range overlaps any of the
657 * ASL operation region address ranges.
658 *
659 ****************************************************************************/
660u32
661acpi_check_address_range(acpi_adr_space_type space_id,
662 acpi_physical_address address,
663 acpi_size length, u8 warn)
664{
665 u32 overlaps;
666 acpi_status status;
667
668 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
669 if (ACPI_FAILURE(status)) {
670 return (0);
671 }
672
673 overlaps = acpi_ut_check_address_range(space_id, address,
674 (u32)length, warn);
675
676 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
677 return (overlaps);
678}
679
680ACPI_EXPORT_SYMBOL(acpi_check_address_range)
643#endif /* !ACPI_ASL_COMPILER */ 681#endif /* !ACPI_ASL_COMPILER */
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 8d0245ec4315..52b568af1819 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2011, Intel Corp. 8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
new file mode 100644
index 000000000000..1427d191d15a
--- /dev/null
+++ b/drivers/acpi/acpica/utxfmutex.c
@@ -0,0 +1,187 @@
1/*******************************************************************************
2 *
3 * Module Name: utxfmutex - external AML mutex access functions
4 *
5 ******************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acnamesp.h"
47
48#define _COMPONENT ACPI_UTILITIES
49ACPI_MODULE_NAME("utxfmutex")
50
51/* Local prototypes */
52static acpi_status
53acpi_ut_get_mutex_object(acpi_handle handle,
54 acpi_string pathname,
55 union acpi_operand_object **ret_obj);
56
57/*******************************************************************************
58 *
59 * FUNCTION: acpi_ut_get_mutex_object
60 *
61 * PARAMETERS: Handle - Mutex or prefix handle (optional)
62 * Pathname - Mutex pathname (optional)
63 * ret_obj - Where the mutex object is returned
64 *
65 * RETURN: Status
66 *
67 * DESCRIPTION: Get an AML mutex object. The mutex node is pointed to by
68 * Handle:Pathname. Either Handle or Pathname can be NULL, but
69 * not both.
70 *
71 ******************************************************************************/
72
73static acpi_status
74acpi_ut_get_mutex_object(acpi_handle handle,
75 acpi_string pathname,
76 union acpi_operand_object **ret_obj)
77{
78 struct acpi_namespace_node *mutex_node;
79 union acpi_operand_object *mutex_obj;
80 acpi_status status;
81
82 /* Parameter validation */
83
84 if (!ret_obj || (!handle && !pathname)) {
85 return (AE_BAD_PARAMETER);
86 }
87
88 /* Get a the namespace node for the mutex */
89
90 mutex_node = handle;
91 if (pathname != NULL) {
92 status = acpi_get_handle(handle, pathname,
93 ACPI_CAST_PTR(acpi_handle,
94 &mutex_node));
95 if (ACPI_FAILURE(status)) {
96 return (status);
97 }
98 }
99
100 /* Ensure that we actually have a Mutex object */
101
102 if (!mutex_node || (mutex_node->type != ACPI_TYPE_MUTEX)) {
103 return (AE_TYPE);
104 }
105
106 /* Get the low-level mutex object */
107
108 mutex_obj = acpi_ns_get_attached_object(mutex_node);
109 if (!mutex_obj) {
110 return (AE_NULL_OBJECT);
111 }
112
113 *ret_obj = mutex_obj;
114 return (AE_OK);
115}
116
117/*******************************************************************************
118 *
119 * FUNCTION: acpi_acquire_mutex
120 *
121 * PARAMETERS: Handle - Mutex or prefix handle (optional)
122 * Pathname - Mutex pathname (optional)
123 * Timeout - Max time to wait for the lock (millisec)
124 *
125 * RETURN: Status
126 *
127 * DESCRIPTION: Acquire an AML mutex. This is a device driver interface to
128 * AML mutex objects, and allows for transaction locking between
129 * drivers and AML code. The mutex node is pointed to by
130 * Handle:Pathname. Either Handle or Pathname can be NULL, but
131 * not both.
132 *
133 ******************************************************************************/
134
135acpi_status
136acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout)
137{
138 acpi_status status;
139 union acpi_operand_object *mutex_obj;
140
141 /* Get the low-level mutex associated with Handle:Pathname */
142
143 status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj);
144 if (ACPI_FAILURE(status)) {
145 return (status);
146 }
147
148 /* Acquire the OS mutex */
149
150 status = acpi_os_acquire_mutex(mutex_obj->mutex.os_mutex, timeout);
151 return (status);
152}
153
154/*******************************************************************************
155 *
156 * FUNCTION: acpi_release_mutex
157 *
158 * PARAMETERS: Handle - Mutex or prefix handle (optional)
159 * Pathname - Mutex pathname (optional)
160 *
161 * RETURN: Status
162 *
163 * DESCRIPTION: Release an AML mutex. This is a device driver interface to
164 * AML mutex objects, and allows for transaction locking between
165 * drivers and AML code. The mutex node is pointed to by
166 * Handle:Pathname. Either Handle or Pathname can be NULL, but
167 * not both.
168 *
169 ******************************************************************************/
170
171acpi_status acpi_release_mutex(acpi_handle handle, acpi_string pathname)
172{
173 acpi_status status;
174 union acpi_operand_object *mutex_obj;
175
176 /* Get the low-level mutex associated with Handle:Pathname */
177
178 status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj);
179 if (ACPI_FAILURE(status)) {
180 return (status);
181 }
182
183 /* Release the OS mutex */
184
185 acpi_os_release_mutex(mutex_obj->mutex.os_mutex);
186 return (AE_OK);
187}
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 61540360d5ce..e45350cb6ac8 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -34,13 +34,13 @@
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/acpi.h> 36#include <linux/acpi.h>
37#include <linux/acpi_io.h>
37#include <linux/slab.h> 38#include <linux/slab.h>
38#include <linux/io.h> 39#include <linux/io.h>
39#include <linux/kref.h> 40#include <linux/kref.h>
40#include <linux/rculist.h> 41#include <linux/rculist.h>
41#include <linux/interrupt.h> 42#include <linux/interrupt.h>
42#include <linux/debugfs.h> 43#include <linux/debugfs.h>
43#include <acpi/atomicio.h>
44 44
45#include "apei-internal.h" 45#include "apei-internal.h"
46 46
@@ -70,7 +70,7 @@ int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
70{ 70{
71 int rc; 71 int rc;
72 72
73 rc = acpi_atomic_read(val, &entry->register_region); 73 rc = apei_read(val, &entry->register_region);
74 if (rc) 74 if (rc)
75 return rc; 75 return rc;
76 *val >>= entry->register_region.bit_offset; 76 *val >>= entry->register_region.bit_offset;
@@ -116,13 +116,13 @@ int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
116 val <<= entry->register_region.bit_offset; 116 val <<= entry->register_region.bit_offset;
117 if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) { 117 if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
118 u64 valr = 0; 118 u64 valr = 0;
119 rc = acpi_atomic_read(&valr, &entry->register_region); 119 rc = apei_read(&valr, &entry->register_region);
120 if (rc) 120 if (rc)
121 return rc; 121 return rc;
122 valr &= ~(entry->mask << entry->register_region.bit_offset); 122 valr &= ~(entry->mask << entry->register_region.bit_offset);
123 val |= valr; 123 val |= valr;
124 } 124 }
125 rc = acpi_atomic_write(val, &entry->register_region); 125 rc = apei_write(val, &entry->register_region);
126 126
127 return rc; 127 return rc;
128} 128}
@@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx,
243 u8 ins = entry->instruction; 243 u8 ins = entry->instruction;
244 244
245 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) 245 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
246 return acpi_pre_map_gar(&entry->register_region); 246 return acpi_os_map_generic_address(&entry->register_region);
247 247
248 return 0; 248 return 0;
249} 249}
@@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx,
276 u8 ins = entry->instruction; 276 u8 ins = entry->instruction;
277 277
278 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) 278 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
279 acpi_post_unmap_gar(&entry->register_region); 279 acpi_os_unmap_generic_address(&entry->register_region);
280 280
281 return 0; 281 return 0;
282} 282}
@@ -421,6 +421,17 @@ static int apei_resources_merge(struct apei_resources *resources1,
421 return 0; 421 return 0;
422} 422}
423 423
424int apei_resources_add(struct apei_resources *resources,
425 unsigned long start, unsigned long size,
426 bool iomem)
427{
428 if (iomem)
429 return apei_res_add(&resources->iomem, start, size);
430 else
431 return apei_res_add(&resources->ioport, start, size);
432}
433EXPORT_SYMBOL_GPL(apei_resources_add);
434
424/* 435/*
425 * EINJ has two groups of GARs (EINJ table entry and trigger table 436 * EINJ has two groups of GARs (EINJ table entry and trigger table
426 * entry), so common resources are subtracted from the trigger table 437 * entry), so common resources are subtracted from the trigger table
@@ -438,8 +449,19 @@ int apei_resources_sub(struct apei_resources *resources1,
438} 449}
439EXPORT_SYMBOL_GPL(apei_resources_sub); 450EXPORT_SYMBOL_GPL(apei_resources_sub);
440 451
452static int apei_get_nvs_callback(__u64 start, __u64 size, void *data)
453{
454 struct apei_resources *resources = data;
455 return apei_res_add(&resources->iomem, start, size);
456}
457
458static int apei_get_nvs_resources(struct apei_resources *resources)
459{
460 return acpi_nvs_for_each_region(apei_get_nvs_callback, resources);
461}
462
441/* 463/*
442 * IO memory/port rersource management mechanism is used to check 464 * IO memory/port resource management mechanism is used to check
443 * whether memory/port area used by GARs conflicts with normal memory 465 * whether memory/port area used by GARs conflicts with normal memory
444 * or IO memory/port of devices. 466 * or IO memory/port of devices.
445 */ 467 */
@@ -448,21 +470,35 @@ int apei_resources_request(struct apei_resources *resources,
448{ 470{
449 struct apei_res *res, *res_bak = NULL; 471 struct apei_res *res, *res_bak = NULL;
450 struct resource *r; 472 struct resource *r;
473 struct apei_resources nvs_resources;
451 int rc; 474 int rc;
452 475
453 rc = apei_resources_sub(resources, &apei_resources_all); 476 rc = apei_resources_sub(resources, &apei_resources_all);
454 if (rc) 477 if (rc)
455 return rc; 478 return rc;
456 479
480 /*
481 * Some firmware uses ACPI NVS region, that has been marked as
482 * busy, so exclude it from APEI resources to avoid false
483 * conflict.
484 */
485 apei_resources_init(&nvs_resources);
486 rc = apei_get_nvs_resources(&nvs_resources);
487 if (rc)
488 goto res_fini;
489 rc = apei_resources_sub(resources, &nvs_resources);
490 if (rc)
491 goto res_fini;
492
457 rc = -EINVAL; 493 rc = -EINVAL;
458 list_for_each_entry(res, &resources->iomem, list) { 494 list_for_each_entry(res, &resources->iomem, list) {
459 r = request_mem_region(res->start, res->end - res->start, 495 r = request_mem_region(res->start, res->end - res->start,
460 desc); 496 desc);
461 if (!r) { 497 if (!r) {
462 pr_err(APEI_PFX 498 pr_err(APEI_PFX
463 "Can not request iomem region <%016llx-%016llx> for GARs.\n", 499 "Can not request [mem %#010llx-%#010llx] for %s registers\n",
464 (unsigned long long)res->start, 500 (unsigned long long)res->start,
465 (unsigned long long)res->end); 501 (unsigned long long)res->end - 1, desc);
466 res_bak = res; 502 res_bak = res;
467 goto err_unmap_iomem; 503 goto err_unmap_iomem;
468 } 504 }
@@ -472,9 +508,9 @@ int apei_resources_request(struct apei_resources *resources,
472 r = request_region(res->start, res->end - res->start, desc); 508 r = request_region(res->start, res->end - res->start, desc);
473 if (!r) { 509 if (!r) {
474 pr_err(APEI_PFX 510 pr_err(APEI_PFX
475 "Can not request ioport region <%016llx-%016llx> for GARs.\n", 511 "Can not request [io %#06llx-%#06llx] for %s registers\n",
476 (unsigned long long)res->start, 512 (unsigned long long)res->start,
477 (unsigned long long)res->end); 513 (unsigned long long)res->end - 1, desc);
478 res_bak = res; 514 res_bak = res;
479 goto err_unmap_ioport; 515 goto err_unmap_ioport;
480 } 516 }
@@ -500,6 +536,8 @@ err_unmap_iomem:
500 break; 536 break;
501 release_mem_region(res->start, res->end - res->start); 537 release_mem_region(res->start, res->end - res->start);
502 } 538 }
539res_fini:
540 apei_resources_fini(&nvs_resources);
503 return rc; 541 return rc;
504} 542}
505EXPORT_SYMBOL_GPL(apei_resources_request); 543EXPORT_SYMBOL_GPL(apei_resources_request);
@@ -553,6 +591,96 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
553 return 0; 591 return 0;
554} 592}
555 593
594/* read GAR in interrupt (including NMI) or process context */
595int apei_read(u64 *val, struct acpi_generic_address *reg)
596{
597 int rc;
598 u64 address;
599 u32 tmp, width = reg->bit_width;
600 acpi_status status;
601
602 rc = apei_check_gar(reg, &address);
603 if (rc)
604 return rc;
605
606 if (width == 64)
607 width = 32; /* Break into two 32-bit transfers */
608
609 *val = 0;
610 switch(reg->space_id) {
611 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
612 status = acpi_os_read_memory((acpi_physical_address)
613 address, &tmp, width);
614 if (ACPI_FAILURE(status))
615 return -EIO;
616 *val = tmp;
617
618 if (reg->bit_width == 64) {
619 /* Read the top 32 bits */
620 status = acpi_os_read_memory((acpi_physical_address)
621 (address + 4), &tmp, 32);
622 if (ACPI_FAILURE(status))
623 return -EIO;
624 *val |= ((u64)tmp << 32);
625 }
626 break;
627 case ACPI_ADR_SPACE_SYSTEM_IO:
628 status = acpi_os_read_port(address, (u32 *)val, reg->bit_width);
629 if (ACPI_FAILURE(status))
630 return -EIO;
631 break;
632 default:
633 return -EINVAL;
634 }
635
636 return 0;
637}
638EXPORT_SYMBOL_GPL(apei_read);
639
640/* write GAR in interrupt (including NMI) or process context */
641int apei_write(u64 val, struct acpi_generic_address *reg)
642{
643 int rc;
644 u64 address;
645 u32 width = reg->bit_width;
646 acpi_status status;
647
648 rc = apei_check_gar(reg, &address);
649 if (rc)
650 return rc;
651
652 if (width == 64)
653 width = 32; /* Break into two 32-bit transfers */
654
655 switch (reg->space_id) {
656 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
657 status = acpi_os_write_memory((acpi_physical_address)
658 address, ACPI_LODWORD(val),
659 width);
660 if (ACPI_FAILURE(status))
661 return -EIO;
662
663 if (reg->bit_width == 64) {
664 status = acpi_os_write_memory((acpi_physical_address)
665 (address + 4),
666 ACPI_HIDWORD(val), 32);
667 if (ACPI_FAILURE(status))
668 return -EIO;
669 }
670 break;
671 case ACPI_ADR_SPACE_SYSTEM_IO:
672 status = acpi_os_write_port(address, val, reg->bit_width);
673 if (ACPI_FAILURE(status))
674 return -EIO;
675 break;
676 default:
677 return -EINVAL;
678 }
679
680 return 0;
681}
682EXPORT_SYMBOL_GPL(apei_write);
683
556static int collect_res_callback(struct apei_exec_context *ctx, 684static int collect_res_callback(struct apei_exec_context *ctx,
557 struct acpi_whea_header *entry, 685 struct acpi_whea_header *entry,
558 void *data) 686 void *data)
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index f57050e7a5e7..cca240a33038 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -68,6 +68,9 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio
68/* IP has been set in instruction function */ 68/* IP has been set in instruction function */
69#define APEI_EXEC_SET_IP 1 69#define APEI_EXEC_SET_IP 1
70 70
71int apei_read(u64 *val, struct acpi_generic_address *reg);
72int apei_write(u64 val, struct acpi_generic_address *reg);
73
71int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val); 74int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
72int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val); 75int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
73int apei_exec_read_register(struct apei_exec_context *ctx, 76int apei_exec_read_register(struct apei_exec_context *ctx,
@@ -95,6 +98,9 @@ static inline void apei_resources_init(struct apei_resources *resources)
95} 98}
96 99
97void apei_resources_fini(struct apei_resources *resources); 100void apei_resources_fini(struct apei_resources *resources);
101int apei_resources_add(struct apei_resources *resources,
102 unsigned long start, unsigned long size,
103 bool iomem);
98int apei_resources_sub(struct apei_resources *resources1, 104int apei_resources_sub(struct apei_resources *resources1,
99 struct apei_resources *resources2); 105 struct apei_resources *resources2);
100int apei_resources_request(struct apei_resources *resources, 106int apei_resources_request(struct apei_resources *resources,
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 589b96c38704..5b898d4dda99 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -43,6 +43,42 @@
43#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC) 43#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
44 44
45/* 45/*
46 * ACPI version 5 provides a SET_ERROR_TYPE_WITH_ADDRESS action.
47 */
48static int acpi5;
49
50struct set_error_type_with_address {
51 u32 type;
52 u32 vendor_extension;
53 u32 flags;
54 u32 apicid;
55 u64 memory_address;
56 u64 memory_address_range;
57 u32 pcie_sbdf;
58};
59enum {
60 SETWA_FLAGS_APICID = 1,
61 SETWA_FLAGS_MEM = 2,
62 SETWA_FLAGS_PCIE_SBDF = 4,
63};
64
65/*
66 * Vendor extensions for platform specific operations
67 */
68struct vendor_error_type_extension {
69 u32 length;
70 u32 pcie_sbdf;
71 u16 vendor_id;
72 u16 device_id;
73 u8 rev_id;
74 u8 reserved[3];
75};
76
77static u32 vendor_flags;
78static struct debugfs_blob_wrapper vendor_blob;
79static char vendor_dev[64];
80
81/*
46 * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the 82 * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
47 * EINJ table through an unpublished extension. Use with caution as 83 * EINJ table through an unpublished extension. Use with caution as
48 * most will ignore the parameter and make their own choice of address 84 * most will ignore the parameter and make their own choice of address
@@ -103,7 +139,14 @@ static struct apei_exec_ins_type einj_ins_type[] = {
103 */ 139 */
104static DEFINE_MUTEX(einj_mutex); 140static DEFINE_MUTEX(einj_mutex);
105 141
106static struct einj_parameter *einj_param; 142static void *einj_param;
143
144#ifndef readq
145static inline __u64 readq(volatile void __iomem *addr)
146{
147 return ((__u64)readl(addr+4) << 32) + readl(addr);
148}
149#endif
107 150
108#ifndef writeq 151#ifndef writeq
109static inline void writeq(__u64 val, volatile void __iomem *addr) 152static inline void writeq(__u64 val, volatile void __iomem *addr)
@@ -158,10 +201,31 @@ static int einj_timedout(u64 *t)
158 return 0; 201 return 0;
159} 202}
160 203
161static u64 einj_get_parameter_address(void) 204static void check_vendor_extension(u64 paddr,
205 struct set_error_type_with_address *v5param)
206{
207 int offset = readl(&v5param->vendor_extension);
208 struct vendor_error_type_extension *v;
209 u32 sbdf;
210
211 if (!offset)
212 return;
213 v = ioremap(paddr + offset, sizeof(*v));
214 if (!v)
215 return;
216 sbdf = readl(&v->pcie_sbdf);
217 sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n",
218 sbdf >> 24, (sbdf >> 16) & 0xff,
219 (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7,
220 readw(&v->vendor_id), readw(&v->device_id),
221 readb(&v->rev_id));
222 iounmap(v);
223}
224
225static void *einj_get_parameter_address(void)
162{ 226{
163 int i; 227 int i;
164 u64 paddr = 0; 228 u64 paddrv4 = 0, paddrv5 = 0;
165 struct acpi_whea_header *entry; 229 struct acpi_whea_header *entry;
166 230
167 entry = EINJ_TAB_ENTRY(einj_tab); 231 entry = EINJ_TAB_ENTRY(einj_tab);
@@ -170,12 +234,40 @@ static u64 einj_get_parameter_address(void)
170 entry->instruction == ACPI_EINJ_WRITE_REGISTER && 234 entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
171 entry->register_region.space_id == 235 entry->register_region.space_id ==
172 ACPI_ADR_SPACE_SYSTEM_MEMORY) 236 ACPI_ADR_SPACE_SYSTEM_MEMORY)
173 memcpy(&paddr, &entry->register_region.address, 237 memcpy(&paddrv4, &entry->register_region.address,
174 sizeof(paddr)); 238 sizeof(paddrv4));
239 if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS &&
240 entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
241 entry->register_region.space_id ==
242 ACPI_ADR_SPACE_SYSTEM_MEMORY)
243 memcpy(&paddrv5, &entry->register_region.address,
244 sizeof(paddrv5));
175 entry++; 245 entry++;
176 } 246 }
247 if (paddrv5) {
248 struct set_error_type_with_address *v5param;
249
250 v5param = ioremap(paddrv5, sizeof(*v5param));
251 if (v5param) {
252 acpi5 = 1;
253 check_vendor_extension(paddrv5, v5param);
254 return v5param;
255 }
256 }
257 if (paddrv4) {
258 struct einj_parameter *v4param;
259
260 v4param = ioremap(paddrv4, sizeof(*v4param));
261 if (!v4param)
262 return 0;
263 if (readq(&v4param->reserved1) || readq(&v4param->reserved2)) {
264 iounmap(v4param);
265 return 0;
266 }
267 return v4param;
268 }
177 269
178 return paddr; 270 return 0;
179} 271}
180 272
181/* do sanity check to trigger table */ 273/* do sanity check to trigger table */
@@ -194,8 +286,29 @@ static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
194 return 0; 286 return 0;
195} 287}
196 288
289static struct acpi_generic_address *einj_get_trigger_parameter_region(
290 struct acpi_einj_trigger *trigger_tab, u64 param1, u64 param2)
291{
292 int i;
293 struct acpi_whea_header *entry;
294
295 entry = (struct acpi_whea_header *)
296 ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
297 for (i = 0; i < trigger_tab->entry_count; i++) {
298 if (entry->action == ACPI_EINJ_TRIGGER_ERROR &&
299 entry->instruction == ACPI_EINJ_WRITE_REGISTER_VALUE &&
300 entry->register_region.space_id ==
301 ACPI_ADR_SPACE_SYSTEM_MEMORY &&
302 (entry->register_region.address & param2) == (param1 & param2))
303 return &entry->register_region;
304 entry++;
305 }
306
307 return NULL;
308}
197/* Execute instructions in trigger error action table */ 309/* Execute instructions in trigger error action table */
198static int __einj_error_trigger(u64 trigger_paddr) 310static int __einj_error_trigger(u64 trigger_paddr, u32 type,
311 u64 param1, u64 param2)
199{ 312{
200 struct acpi_einj_trigger *trigger_tab = NULL; 313 struct acpi_einj_trigger *trigger_tab = NULL;
201 struct apei_exec_context trigger_ctx; 314 struct apei_exec_context trigger_ctx;
@@ -204,14 +317,16 @@ static int __einj_error_trigger(u64 trigger_paddr)
204 struct resource *r; 317 struct resource *r;
205 u32 table_size; 318 u32 table_size;
206 int rc = -EIO; 319 int rc = -EIO;
320 struct acpi_generic_address *trigger_param_region = NULL;
207 321
208 r = request_mem_region(trigger_paddr, sizeof(*trigger_tab), 322 r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
209 "APEI EINJ Trigger Table"); 323 "APEI EINJ Trigger Table");
210 if (!r) { 324 if (!r) {
211 pr_err(EINJ_PFX 325 pr_err(EINJ_PFX
212 "Can not request iomem region <%016llx-%016llx> for Trigger table.\n", 326 "Can not request [mem %#010llx-%#010llx] for Trigger table\n",
213 (unsigned long long)trigger_paddr, 327 (unsigned long long)trigger_paddr,
214 (unsigned long long)trigger_paddr+sizeof(*trigger_tab)); 328 (unsigned long long)trigger_paddr +
329 sizeof(*trigger_tab) - 1);
215 goto out; 330 goto out;
216 } 331 }
217 trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab)); 332 trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
@@ -232,9 +347,9 @@ static int __einj_error_trigger(u64 trigger_paddr)
232 "APEI EINJ Trigger Table"); 347 "APEI EINJ Trigger Table");
233 if (!r) { 348 if (!r) {
234 pr_err(EINJ_PFX 349 pr_err(EINJ_PFX
235"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n", 350"Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n",
236 (unsigned long long)trigger_paddr+sizeof(*trigger_tab), 351 (unsigned long long)trigger_paddr + sizeof(*trigger_tab),
237 (unsigned long long)trigger_paddr + table_size); 352 (unsigned long long)trigger_paddr + table_size - 1);
238 goto out_rel_header; 353 goto out_rel_header;
239 } 354 }
240 iounmap(trigger_tab); 355 iounmap(trigger_tab);
@@ -255,6 +370,30 @@ static int __einj_error_trigger(u64 trigger_paddr)
255 rc = apei_resources_sub(&trigger_resources, &einj_resources); 370 rc = apei_resources_sub(&trigger_resources, &einj_resources);
256 if (rc) 371 if (rc)
257 goto out_fini; 372 goto out_fini;
373 /*
374 * Some firmware will access target address specified in
375 * param1 to trigger the error when injecting memory error.
376 * This will cause resource conflict with regular memory. So
377 * remove it from trigger table resources.
378 */
379 if (param_extension && (type & 0x0038) && param2) {
380 struct apei_resources addr_resources;
381 apei_resources_init(&addr_resources);
382 trigger_param_region = einj_get_trigger_parameter_region(
383 trigger_tab, param1, param2);
384 if (trigger_param_region) {
385 rc = apei_resources_add(&addr_resources,
386 trigger_param_region->address,
387 trigger_param_region->bit_width/8, true);
388 if (rc)
389 goto out_fini;
390 rc = apei_resources_sub(&trigger_resources,
391 &addr_resources);
392 }
393 apei_resources_fini(&addr_resources);
394 if (rc)
395 goto out_fini;
396 }
258 rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger"); 397 rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
259 if (rc) 398 if (rc)
260 goto out_fini; 399 goto out_fini;
@@ -293,12 +432,56 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
293 if (rc) 432 if (rc)
294 return rc; 433 return rc;
295 apei_exec_ctx_set_input(&ctx, type); 434 apei_exec_ctx_set_input(&ctx, type);
296 rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE); 435 if (acpi5) {
297 if (rc) 436 struct set_error_type_with_address *v5param = einj_param;
298 return rc; 437
299 if (einj_param) { 438 writel(type, &v5param->type);
300 writeq(param1, &einj_param->param1); 439 if (type & 0x80000000) {
301 writeq(param2, &einj_param->param2); 440 switch (vendor_flags) {
441 case SETWA_FLAGS_APICID:
442 writel(param1, &v5param->apicid);
443 break;
444 case SETWA_FLAGS_MEM:
445 writeq(param1, &v5param->memory_address);
446 writeq(param2, &v5param->memory_address_range);
447 break;
448 case SETWA_FLAGS_PCIE_SBDF:
449 writel(param1, &v5param->pcie_sbdf);
450 break;
451 }
452 writel(vendor_flags, &v5param->flags);
453 } else {
454 switch (type) {
455 case ACPI_EINJ_PROCESSOR_CORRECTABLE:
456 case ACPI_EINJ_PROCESSOR_UNCORRECTABLE:
457 case ACPI_EINJ_PROCESSOR_FATAL:
458 writel(param1, &v5param->apicid);
459 writel(SETWA_FLAGS_APICID, &v5param->flags);
460 break;
461 case ACPI_EINJ_MEMORY_CORRECTABLE:
462 case ACPI_EINJ_MEMORY_UNCORRECTABLE:
463 case ACPI_EINJ_MEMORY_FATAL:
464 writeq(param1, &v5param->memory_address);
465 writeq(param2, &v5param->memory_address_range);
466 writel(SETWA_FLAGS_MEM, &v5param->flags);
467 break;
468 case ACPI_EINJ_PCIX_CORRECTABLE:
469 case ACPI_EINJ_PCIX_UNCORRECTABLE:
470 case ACPI_EINJ_PCIX_FATAL:
471 writel(param1, &v5param->pcie_sbdf);
472 writel(SETWA_FLAGS_PCIE_SBDF, &v5param->flags);
473 break;
474 }
475 }
476 } else {
477 rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
478 if (rc)
479 return rc;
480 if (einj_param) {
481 struct einj_parameter *v4param = einj_param;
482 writeq(param1, &v4param->param1);
483 writeq(param2, &v4param->param2);
484 }
302 } 485 }
303 rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION); 486 rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
304 if (rc) 487 if (rc)
@@ -324,7 +507,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
324 if (rc) 507 if (rc)
325 return rc; 508 return rc;
326 trigger_paddr = apei_exec_ctx_get_output(&ctx); 509 trigger_paddr = apei_exec_ctx_get_output(&ctx);
327 rc = __einj_error_trigger(trigger_paddr); 510 rc = __einj_error_trigger(trigger_paddr, type, param1, param2);
328 if (rc) 511 if (rc)
329 return rc; 512 return rc;
330 rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION); 513 rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
@@ -408,15 +591,25 @@ static int error_type_set(void *data, u64 val)
408{ 591{
409 int rc; 592 int rc;
410 u32 available_error_type = 0; 593 u32 available_error_type = 0;
594 u32 tval, vendor;
595
596 /*
597 * Vendor defined types have 0x80000000 bit set, and
598 * are not enumerated by ACPI_EINJ_GET_ERROR_TYPE
599 */
600 vendor = val & 0x80000000;
601 tval = val & 0x7fffffff;
411 602
412 /* Only one error type can be specified */ 603 /* Only one error type can be specified */
413 if (val & (val - 1)) 604 if (tval & (tval - 1))
414 return -EINVAL;
415 rc = einj_get_available_error_type(&available_error_type);
416 if (rc)
417 return rc;
418 if (!(val & available_error_type))
419 return -EINVAL; 605 return -EINVAL;
606 if (!vendor) {
607 rc = einj_get_available_error_type(&available_error_type);
608 if (rc)
609 return rc;
610 if (!(val & available_error_type))
611 return -EINVAL;
612 }
420 error_type = val; 613 error_type = val;
421 614
422 return 0; 615 return 0;
@@ -455,7 +648,6 @@ static int einj_check_table(struct acpi_table_einj *einj_tab)
455static int __init einj_init(void) 648static int __init einj_init(void)
456{ 649{
457 int rc; 650 int rc;
458 u64 param_paddr;
459 acpi_status status; 651 acpi_status status;
460 struct dentry *fentry; 652 struct dentry *fentry;
461 struct apei_exec_context ctx; 653 struct apei_exec_context ctx;
@@ -465,10 +657,9 @@ static int __init einj_init(void)
465 657
466 status = acpi_get_table(ACPI_SIG_EINJ, 0, 658 status = acpi_get_table(ACPI_SIG_EINJ, 0,
467 (struct acpi_table_header **)&einj_tab); 659 (struct acpi_table_header **)&einj_tab);
468 if (status == AE_NOT_FOUND) { 660 if (status == AE_NOT_FOUND)
469 pr_info(EINJ_PFX "Table is not found!\n");
470 return -ENODEV; 661 return -ENODEV;
471 } else if (ACPI_FAILURE(status)) { 662 else if (ACPI_FAILURE(status)) {
472 const char *msg = acpi_format_exception(status); 663 const char *msg = acpi_format_exception(status);
473 pr_err(EINJ_PFX "Failed to get table, %s\n", msg); 664 pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
474 return -EINVAL; 665 return -EINVAL;
@@ -509,23 +700,30 @@ static int __init einj_init(void)
509 rc = apei_exec_pre_map_gars(&ctx); 700 rc = apei_exec_pre_map_gars(&ctx);
510 if (rc) 701 if (rc)
511 goto err_release; 702 goto err_release;
512 if (param_extension) { 703
513 param_paddr = einj_get_parameter_address(); 704 einj_param = einj_get_parameter_address();
514 if (param_paddr) { 705 if ((param_extension || acpi5) && einj_param) {
515 einj_param = ioremap(param_paddr, sizeof(*einj_param)); 706 fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
516 rc = -ENOMEM; 707 einj_debug_dir, &error_param1);
517 if (!einj_param) 708 if (!fentry)
518 goto err_unmap; 709 goto err_unmap;
519 fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR, 710 fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
520 einj_debug_dir, &error_param1); 711 einj_debug_dir, &error_param2);
521 if (!fentry) 712 if (!fentry)
522 goto err_unmap; 713 goto err_unmap;
523 fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR, 714 }
524 einj_debug_dir, &error_param2); 715
525 if (!fentry) 716 if (vendor_dev[0]) {
526 goto err_unmap; 717 vendor_blob.data = vendor_dev;
527 } else 718 vendor_blob.size = strlen(vendor_dev);
528 pr_warn(EINJ_PFX "Parameter extension is not supported.\n"); 719 fentry = debugfs_create_blob("vendor", S_IRUSR,
720 einj_debug_dir, &vendor_blob);
721 if (!fentry)
722 goto err_unmap;
723 fentry = debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR,
724 einj_debug_dir, &vendor_flags);
725 if (!fentry)
726 goto err_unmap;
529 } 727 }
530 728
531 pr_info(EINJ_PFX "Error INJection is initialized.\n"); 729 pr_info(EINJ_PFX "Error INJection is initialized.\n");
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 6a9e3bad13f4..eb9fab5b96e4 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1127,10 +1127,9 @@ static int __init erst_init(void)
1127 1127
1128 status = acpi_get_table(ACPI_SIG_ERST, 0, 1128 status = acpi_get_table(ACPI_SIG_ERST, 0,
1129 (struct acpi_table_header **)&erst_tab); 1129 (struct acpi_table_header **)&erst_tab);
1130 if (status == AE_NOT_FOUND) { 1130 if (status == AE_NOT_FOUND)
1131 pr_info(ERST_PFX "Table is not found!\n");
1132 goto err; 1131 goto err;
1133 } else if (ACPI_FAILURE(status)) { 1132 else if (ACPI_FAILURE(status)) {
1134 const char *msg = acpi_format_exception(status); 1133 const char *msg = acpi_format_exception(status);
1135 pr_err(ERST_PFX "Failed to get table, %s\n", msg); 1134 pr_err(ERST_PFX "Failed to get table, %s\n", msg);
1136 rc = -EINVAL; 1135 rc = -EINVAL;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index ebaf037a787b..9b3cac0abecc 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -33,6 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/acpi.h> 35#include <linux/acpi.h>
36#include <linux/acpi_io.h>
36#include <linux/io.h> 37#include <linux/io.h>
37#include <linux/interrupt.h> 38#include <linux/interrupt.h>
38#include <linux/timer.h> 39#include <linux/timer.h>
@@ -45,8 +46,9 @@
45#include <linux/irq_work.h> 46#include <linux/irq_work.h>
46#include <linux/llist.h> 47#include <linux/llist.h>
47#include <linux/genalloc.h> 48#include <linux/genalloc.h>
49#include <linux/pci.h>
50#include <linux/aer.h>
48#include <acpi/apei.h> 51#include <acpi/apei.h>
49#include <acpi/atomicio.h>
50#include <acpi/hed.h> 52#include <acpi/hed.h>
51#include <asm/mce.h> 53#include <asm/mce.h>
52#include <asm/tlbflush.h> 54#include <asm/tlbflush.h>
@@ -299,7 +301,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
299 if (!ghes) 301 if (!ghes)
300 return ERR_PTR(-ENOMEM); 302 return ERR_PTR(-ENOMEM);
301 ghes->generic = generic; 303 ghes->generic = generic;
302 rc = acpi_pre_map_gar(&generic->error_status_address); 304 rc = acpi_os_map_generic_address(&generic->error_status_address);
303 if (rc) 305 if (rc)
304 goto err_free; 306 goto err_free;
305 error_block_length = generic->error_block_length; 307 error_block_length = generic->error_block_length;
@@ -319,7 +321,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
319 return ghes; 321 return ghes;
320 322
321err_unmap: 323err_unmap:
322 acpi_post_unmap_gar(&generic->error_status_address); 324 acpi_os_unmap_generic_address(&generic->error_status_address);
323err_free: 325err_free:
324 kfree(ghes); 326 kfree(ghes);
325 return ERR_PTR(rc); 327 return ERR_PTR(rc);
@@ -328,7 +330,7 @@ err_free:
328static void ghes_fini(struct ghes *ghes) 330static void ghes_fini(struct ghes *ghes)
329{ 331{
330 kfree(ghes->estatus); 332 kfree(ghes->estatus);
331 acpi_post_unmap_gar(&ghes->generic->error_status_address); 333 acpi_os_unmap_generic_address(&ghes->generic->error_status_address);
332} 334}
333 335
334enum { 336enum {
@@ -399,7 +401,7 @@ static int ghes_read_estatus(struct ghes *ghes, int silent)
399 u32 len; 401 u32 len;
400 int rc; 402 int rc;
401 403
402 rc = acpi_atomic_read(&buf_paddr, &g->error_status_address); 404 rc = apei_read(&buf_paddr, &g->error_status_address);
403 if (rc) { 405 if (rc) {
404 if (!silent && printk_ratelimit()) 406 if (!silent && printk_ratelimit())
405 pr_warning(FW_WARN GHES_PFX 407 pr_warning(FW_WARN GHES_PFX
@@ -476,6 +478,27 @@ static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
476 } 478 }
477#endif 479#endif
478 } 480 }
481#ifdef CONFIG_ACPI_APEI_PCIEAER
482 else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
483 CPER_SEC_PCIE)) {
484 struct cper_sec_pcie *pcie_err;
485 pcie_err = (struct cper_sec_pcie *)(gdata+1);
486 if (sev == GHES_SEV_RECOVERABLE &&
487 sec_sev == GHES_SEV_RECOVERABLE &&
488 pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
489 pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
490 unsigned int devfn;
491 int aer_severity;
492 devfn = PCI_DEVFN(pcie_err->device_id.device,
493 pcie_err->device_id.function);
494 aer_severity = cper_severity_to_aer(sev);
495 aer_recover_queue(pcie_err->device_id.segment,
496 pcie_err->device_id.bus,
497 devfn, aer_severity);
498 }
499
500 }
501#endif
479 } 502 }
480} 503}
481 504
@@ -483,16 +506,22 @@ static void __ghes_print_estatus(const char *pfx,
483 const struct acpi_hest_generic *generic, 506 const struct acpi_hest_generic *generic,
484 const struct acpi_hest_generic_status *estatus) 507 const struct acpi_hest_generic_status *estatus)
485{ 508{
509 static atomic_t seqno;
510 unsigned int curr_seqno;
511 char pfx_seq[64];
512
486 if (pfx == NULL) { 513 if (pfx == NULL) {
487 if (ghes_severity(estatus->error_severity) <= 514 if (ghes_severity(estatus->error_severity) <=
488 GHES_SEV_CORRECTED) 515 GHES_SEV_CORRECTED)
489 pfx = KERN_WARNING HW_ERR; 516 pfx = KERN_WARNING;
490 else 517 else
491 pfx = KERN_ERR HW_ERR; 518 pfx = KERN_ERR;
492 } 519 }
520 curr_seqno = atomic_inc_return(&seqno);
521 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
493 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n", 522 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
494 pfx, generic->header.source_id); 523 pfx_seq, generic->header.source_id);
495 apei_estatus_print(pfx, estatus); 524 apei_estatus_print(pfx_seq, estatus);
496} 525}
497 526
498static int ghes_print_estatus(const char *pfx, 527static int ghes_print_estatus(const char *pfx,
@@ -711,26 +740,34 @@ static int ghes_notify_sci(struct notifier_block *this,
711 return ret; 740 return ret;
712} 741}
713 742
743static struct llist_node *llist_nodes_reverse(struct llist_node *llnode)
744{
745 struct llist_node *next, *tail = NULL;
746
747 while (llnode) {
748 next = llnode->next;
749 llnode->next = tail;
750 tail = llnode;
751 llnode = next;
752 }
753
754 return tail;
755}
756
714static void ghes_proc_in_irq(struct irq_work *irq_work) 757static void ghes_proc_in_irq(struct irq_work *irq_work)
715{ 758{
716 struct llist_node *llnode, *next, *tail = NULL; 759 struct llist_node *llnode, *next;
717 struct ghes_estatus_node *estatus_node; 760 struct ghes_estatus_node *estatus_node;
718 struct acpi_hest_generic *generic; 761 struct acpi_hest_generic *generic;
719 struct acpi_hest_generic_status *estatus; 762 struct acpi_hest_generic_status *estatus;
720 u32 len, node_len; 763 u32 len, node_len;
721 764
765 llnode = llist_del_all(&ghes_estatus_llist);
722 /* 766 /*
723 * Because the time order of estatus in list is reversed, 767 * Because the time order of estatus in list is reversed,
724 * revert it back to proper order. 768 * revert it back to proper order.
725 */ 769 */
726 llnode = llist_del_all(&ghes_estatus_llist); 770 llnode = llist_nodes_reverse(llnode);
727 while (llnode) {
728 next = llnode->next;
729 llnode->next = tail;
730 tail = llnode;
731 llnode = next;
732 }
733 llnode = tail;
734 while (llnode) { 771 while (llnode) {
735 next = llnode->next; 772 next = llnode->next;
736 estatus_node = llist_entry(llnode, struct ghes_estatus_node, 773 estatus_node = llist_entry(llnode, struct ghes_estatus_node,
@@ -750,6 +787,32 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
750 } 787 }
751} 788}
752 789
790static void ghes_print_queued_estatus(void)
791{
792 struct llist_node *llnode;
793 struct ghes_estatus_node *estatus_node;
794 struct acpi_hest_generic *generic;
795 struct acpi_hest_generic_status *estatus;
796 u32 len, node_len;
797
798 llnode = llist_del_all(&ghes_estatus_llist);
799 /*
800 * Because the time order of estatus in list is reversed,
801 * revert it back to proper order.
802 */
803 llnode = llist_nodes_reverse(llnode);
804 while (llnode) {
805 estatus_node = llist_entry(llnode, struct ghes_estatus_node,
806 llnode);
807 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
808 len = apei_estatus_len(estatus);
809 node_len = GHES_ESTATUS_NODE_LEN(len);
810 generic = estatus_node->generic;
811 ghes_print_estatus(NULL, generic, estatus);
812 llnode = llnode->next;
813 }
814}
815
753static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) 816static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
754{ 817{
755 struct ghes *ghes, *ghes_global = NULL; 818 struct ghes *ghes, *ghes_global = NULL;
@@ -775,7 +838,8 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
775 838
776 if (sev_global >= GHES_SEV_PANIC) { 839 if (sev_global >= GHES_SEV_PANIC) {
777 oops_begin(); 840 oops_begin();
778 __ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global->generic, 841 ghes_print_queued_estatus();
842 __ghes_print_estatus(KERN_EMERG, ghes_global->generic,
779 ghes_global->estatus); 843 ghes_global->estatus);
780 /* reboot to log the error! */ 844 /* reboot to log the error! */
781 if (panic_timeout == 0) 845 if (panic_timeout == 0)
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index ee7fddc4665c..7f00cf38098f 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -221,10 +221,9 @@ void __init acpi_hest_init(void)
221 221
222 status = acpi_get_table(ACPI_SIG_HEST, 0, 222 status = acpi_get_table(ACPI_SIG_HEST, 0,
223 (struct acpi_table_header **)&hest_tab); 223 (struct acpi_table_header **)&hest_tab);
224 if (status == AE_NOT_FOUND) { 224 if (status == AE_NOT_FOUND)
225 pr_info(HEST_PFX "Table not found.\n");
226 goto err; 225 goto err;
227 } else if (ACPI_FAILURE(status)) { 226 else if (ACPI_FAILURE(status)) {
228 const char *msg = acpi_format_exception(status); 227 const char *msg = acpi_format_exception(status);
229 pr_err(HEST_PFX "Failed to get table, %s\n", msg); 228 pr_err(HEST_PFX "Failed to get table, %s\n", msg);
230 rc = -EINVAL; 229 rc = -EINVAL;
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
index cfc0cc10af39..d4a5b3d3657b 100644
--- a/drivers/acpi/atomicio.c
+++ b/drivers/acpi/atomicio.c
@@ -32,6 +32,8 @@
32#include <linux/rculist.h> 32#include <linux/rculist.h>
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/mm.h>
36#include <linux/highmem.h>
35#include <acpi/atomicio.h> 37#include <acpi/atomicio.h>
36 38
37#define ACPI_PFX "ACPI: " 39#define ACPI_PFX "ACPI: "
@@ -97,6 +99,37 @@ static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
97 return NULL; 99 return NULL;
98} 100}
99 101
102#ifndef CONFIG_IA64
103#define should_use_kmap(pfn) page_is_ram(pfn)
104#else
105/* ioremap will take care of cache attributes */
106#define should_use_kmap(pfn) 0
107#endif
108
109static void __iomem *acpi_map(phys_addr_t pg_off, unsigned long pg_sz)
110{
111 unsigned long pfn;
112
113 pfn = pg_off >> PAGE_SHIFT;
114 if (should_use_kmap(pfn)) {
115 if (pg_sz > PAGE_SIZE)
116 return NULL;
117 return (void __iomem __force *)kmap(pfn_to_page(pfn));
118 } else
119 return ioremap(pg_off, pg_sz);
120}
121
122static void acpi_unmap(phys_addr_t pg_off, void __iomem *vaddr)
123{
124 unsigned long pfn;
125
126 pfn = pg_off >> PAGE_SHIFT;
127 if (page_is_ram(pfn))
128 kunmap(pfn_to_page(pfn));
129 else
130 iounmap(vaddr);
131}
132
100/* 133/*
101 * Used to pre-map the specified IO memory area. First try to find 134 * Used to pre-map the specified IO memory area. First try to find
102 * whether the area is already pre-mapped, if it is, increase the 135 * whether the area is already pre-mapped, if it is, increase the
@@ -119,7 +152,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
119 152
120 pg_off = paddr & PAGE_MASK; 153 pg_off = paddr & PAGE_MASK;
121 pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off; 154 pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
122 vaddr = ioremap(pg_off, pg_sz); 155 vaddr = acpi_map(pg_off, pg_sz);
123 if (!vaddr) 156 if (!vaddr)
124 return NULL; 157 return NULL;
125 map = kmalloc(sizeof(*map), GFP_KERNEL); 158 map = kmalloc(sizeof(*map), GFP_KERNEL);
@@ -135,7 +168,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
135 vaddr = __acpi_try_ioremap(paddr, size); 168 vaddr = __acpi_try_ioremap(paddr, size);
136 if (vaddr) { 169 if (vaddr) {
137 spin_unlock_irqrestore(&acpi_iomaps_lock, flags); 170 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
138 iounmap(map->vaddr); 171 acpi_unmap(pg_off, map->vaddr);
139 kfree(map); 172 kfree(map);
140 return vaddr; 173 return vaddr;
141 } 174 }
@@ -144,7 +177,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
144 177
145 return map->vaddr + (paddr - map->paddr); 178 return map->vaddr + (paddr - map->paddr);
146err_unmap: 179err_unmap:
147 iounmap(vaddr); 180 acpi_unmap(pg_off, vaddr);
148 return NULL; 181 return NULL;
149} 182}
150 183
@@ -177,7 +210,7 @@ static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
177 return; 210 return;
178 211
179 synchronize_rcu(); 212 synchronize_rcu();
180 iounmap(map->vaddr); 213 acpi_unmap(map->paddr, map->vaddr);
181 kfree(map); 214 kfree(map);
182} 215}
183 216
@@ -260,6 +293,21 @@ int acpi_post_unmap_gar(struct acpi_generic_address *reg)
260} 293}
261EXPORT_SYMBOL_GPL(acpi_post_unmap_gar); 294EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
262 295
296#ifdef readq
297static inline u64 read64(const volatile void __iomem *addr)
298{
299 return readq(addr);
300}
301#else
302static inline u64 read64(const volatile void __iomem *addr)
303{
304 u64 l, h;
305 l = readl(addr);
306 h = readl(addr+4);
307 return l | (h << 32);
308}
309#endif
310
263/* 311/*
264 * Can be used in atomic (including NMI) or process context. RCU read 312 * Can be used in atomic (including NMI) or process context. RCU read
265 * lock can only be released after the IO memory area accessing. 313 * lock can only be released after the IO memory area accessing.
@@ -280,11 +328,9 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
280 case 32: 328 case 32:
281 *val = readl(addr); 329 *val = readl(addr);
282 break; 330 break;
283#ifdef readq
284 case 64: 331 case 64:
285 *val = readq(addr); 332 *val = read64(addr);
286 break; 333 break;
287#endif
288 default: 334 default:
289 return -EINVAL; 335 return -EINVAL;
290 } 336 }
@@ -293,6 +339,19 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
293 return 0; 339 return 0;
294} 340}
295 341
342#ifdef writeq
343static inline void write64(u64 val, volatile void __iomem *addr)
344{
345 writeq(val, addr);
346}
347#else
348static inline void write64(u64 val, volatile void __iomem *addr)
349{
350 writel(val, addr);
351 writel(val>>32, addr+4);
352}
353#endif
354
296static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width) 355static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
297{ 356{
298 void __iomem *addr; 357 void __iomem *addr;
@@ -309,11 +368,9 @@ static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
309 case 32: 368 case 32:
310 writel(val, addr); 369 writel(val, addr);
311 break; 370 break;
312#ifdef writeq
313 case 64: 371 case 64:
314 writeq(val, addr); 372 write64(val, addr);
315 break; 373 break;
316#endif
317 default: 374 default:
318 return -EINVAL; 375 return -EINVAL;
319 } 376 }
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 3b5c3189fd99..e56f3be7b07d 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -45,6 +45,8 @@ static int pxm_to_node_map[MAX_PXM_DOMAINS]
45static int node_to_pxm_map[MAX_NUMNODES] 45static int node_to_pxm_map[MAX_NUMNODES]
46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; 46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
47 47
48unsigned char acpi_srat_revision __initdata;
49
48int pxm_to_node(int pxm) 50int pxm_to_node(int pxm)
49{ 51{
50 if (pxm < 0) 52 if (pxm < 0)
@@ -255,9 +257,13 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
255 257
256static int __init acpi_parse_srat(struct acpi_table_header *table) 258static int __init acpi_parse_srat(struct acpi_table_header *table)
257{ 259{
260 struct acpi_table_srat *srat;
258 if (!table) 261 if (!table)
259 return -EINVAL; 262 return -EINVAL;
260 263
264 srat = (struct acpi_table_srat *)table;
265 acpi_srat_revision = srat->header.revision;
266
261 /* Real work done in acpi_table_parse_srat below. */ 267 /* Real work done in acpi_table_parse_srat below. */
262 268
263 return 0; 269 return 0;
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
index 096787b43c96..7a2035fa8c71 100644
--- a/drivers/acpi/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -15,6 +15,56 @@
15#include <linux/acpi_io.h> 15#include <linux/acpi_io.h>
16#include <acpi/acpiosxf.h> 16#include <acpi/acpiosxf.h>
17 17
18/* ACPI NVS regions, APEI may use it */
19
20struct nvs_region {
21 __u64 phys_start;
22 __u64 size;
23 struct list_head node;
24};
25
26static LIST_HEAD(nvs_region_list);
27
28#ifdef CONFIG_ACPI_SLEEP
29static int suspend_nvs_register(unsigned long start, unsigned long size);
30#else
31static inline int suspend_nvs_register(unsigned long a, unsigned long b)
32{
33 return 0;
34}
35#endif
36
37int acpi_nvs_register(__u64 start, __u64 size)
38{
39 struct nvs_region *region;
40
41 region = kmalloc(sizeof(*region), GFP_KERNEL);
42 if (!region)
43 return -ENOMEM;
44 region->phys_start = start;
45 region->size = size;
46 list_add_tail(&region->node, &nvs_region_list);
47
48 return suspend_nvs_register(start, size);
49}
50
51int acpi_nvs_for_each_region(int (*func)(__u64 start, __u64 size, void *data),
52 void *data)
53{
54 int rc;
55 struct nvs_region *region;
56
57 list_for_each_entry(region, &nvs_region_list, node) {
58 rc = func(region->phys_start, region->size, data);
59 if (rc)
60 return rc;
61 }
62
63 return 0;
64}
65
66
67#ifdef CONFIG_ACPI_SLEEP
18/* 68/*
19 * Platforms, like ACPI, may want us to save some memory used by them during 69 * Platforms, like ACPI, may want us to save some memory used by them during
20 * suspend and to restore the contents of this memory during the subsequent 70 * suspend and to restore the contents of this memory during the subsequent
@@ -41,7 +91,7 @@ static LIST_HEAD(nvs_list);
41 * things so that the data from page-aligned addresses in this region will 91 * things so that the data from page-aligned addresses in this region will
42 * be copied into separate RAM pages. 92 * be copied into separate RAM pages.
43 */ 93 */
44int suspend_nvs_register(unsigned long start, unsigned long size) 94static int suspend_nvs_register(unsigned long start, unsigned long size)
45{ 95{
46 struct nvs_page *entry, *next; 96 struct nvs_page *entry, *next;
47 97
@@ -159,3 +209,4 @@ void suspend_nvs_restore(void)
159 if (entry->data) 209 if (entry->data)
160 memcpy(entry->kaddr, entry->data, entry->size); 210 memcpy(entry->kaddr, entry->data, entry->size);
161} 211}
212#endif
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index f31c5c5f1b7e..fcc12d842bcc 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -83,19 +83,6 @@ static struct workqueue_struct *kacpi_notify_wq;
83struct workqueue_struct *kacpi_hotplug_wq; 83struct workqueue_struct *kacpi_hotplug_wq;
84EXPORT_SYMBOL(kacpi_hotplug_wq); 84EXPORT_SYMBOL(kacpi_hotplug_wq);
85 85
86struct acpi_res_list {
87 resource_size_t start;
88 resource_size_t end;
89 acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
90 char name[5]; /* only can have a length of 4 chars, make use of this
91 one instead of res->name, no need to kalloc then */
92 struct list_head resource_list;
93 int count;
94};
95
96static LIST_HEAD(resource_list_head);
97static DEFINE_SPINLOCK(acpi_res_lock);
98
99/* 86/*
100 * This list of permanent mappings is for memory that may be accessed from 87 * This list of permanent mappings is for memory that may be accessed from
101 * interrupt context, where we can't do the ioremap(). 88 * interrupt context, where we can't do the ioremap().
@@ -166,17 +153,21 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
166 return supported; 153 return supported;
167} 154}
168 155
169static void __init acpi_request_region (struct acpi_generic_address *addr, 156static void __init acpi_request_region (struct acpi_generic_address *gas,
170 unsigned int length, char *desc) 157 unsigned int length, char *desc)
171{ 158{
172 if (!addr->address || !length) 159 u64 addr;
160
161 /* Handle possible alignment issues */
162 memcpy(&addr, &gas->address, sizeof(addr));
163 if (!addr || !length)
173 return; 164 return;
174 165
175 /* Resources are never freed */ 166 /* Resources are never freed */
176 if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO) 167 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
177 request_region(addr->address, length, desc); 168 request_region(addr, length, desc);
178 else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 169 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
179 request_mem_region(addr->address, length, desc); 170 request_mem_region(addr, length, desc);
180} 171}
181 172
182static int __init acpi_reserve_resources(void) 173static int __init acpi_reserve_resources(void)
@@ -427,35 +418,42 @@ void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
427 __acpi_unmap_table(virt, size); 418 __acpi_unmap_table(virt, size);
428} 419}
429 420
430static int acpi_os_map_generic_address(struct acpi_generic_address *addr) 421int acpi_os_map_generic_address(struct acpi_generic_address *gas)
431{ 422{
423 u64 addr;
432 void __iomem *virt; 424 void __iomem *virt;
433 425
434 if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 426 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
435 return 0; 427 return 0;
436 428
437 if (!addr->address || !addr->bit_width) 429 /* Handle possible alignment issues */
430 memcpy(&addr, &gas->address, sizeof(addr));
431 if (!addr || !gas->bit_width)
438 return -EINVAL; 432 return -EINVAL;
439 433
440 virt = acpi_os_map_memory(addr->address, addr->bit_width / 8); 434 virt = acpi_os_map_memory(addr, gas->bit_width / 8);
441 if (!virt) 435 if (!virt)
442 return -EIO; 436 return -EIO;
443 437
444 return 0; 438 return 0;
445} 439}
440EXPORT_SYMBOL(acpi_os_map_generic_address);
446 441
447static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr) 442void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
448{ 443{
444 u64 addr;
449 struct acpi_ioremap *map; 445 struct acpi_ioremap *map;
450 446
451 if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 447 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
452 return; 448 return;
453 449
454 if (!addr->address || !addr->bit_width) 450 /* Handle possible alignment issues */
451 memcpy(&addr, &gas->address, sizeof(addr));
452 if (!addr || !gas->bit_width)
455 return; 453 return;
456 454
457 mutex_lock(&acpi_ioremap_lock); 455 mutex_lock(&acpi_ioremap_lock);
458 map = acpi_map_lookup(addr->address, addr->bit_width / 8); 456 map = acpi_map_lookup(addr, gas->bit_width / 8);
459 if (!map) { 457 if (!map) {
460 mutex_unlock(&acpi_ioremap_lock); 458 mutex_unlock(&acpi_ioremap_lock);
461 return; 459 return;
@@ -465,6 +463,7 @@ static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
465 463
466 acpi_os_map_cleanup(map); 464 acpi_os_map_cleanup(map);
467} 465}
466EXPORT_SYMBOL(acpi_os_unmap_generic_address);
468 467
469#ifdef ACPI_FUTURE_USAGE 468#ifdef ACPI_FUTURE_USAGE
470acpi_status 469acpi_status
@@ -1278,44 +1277,28 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1278 * drivers */ 1277 * drivers */
1279int acpi_check_resource_conflict(const struct resource *res) 1278int acpi_check_resource_conflict(const struct resource *res)
1280{ 1279{
1281 struct acpi_res_list *res_list_elem; 1280 acpi_adr_space_type space_id;
1282 int ioport = 0, clash = 0; 1281 acpi_size length;
1282 u8 warn = 0;
1283 int clash = 0;
1283 1284
1284 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1285 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1285 return 0; 1286 return 0;
1286 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM)) 1287 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1287 return 0; 1288 return 0;
1288 1289
1289 ioport = res->flags & IORESOURCE_IO; 1290 if (res->flags & IORESOURCE_IO)
1290 1291 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1291 spin_lock(&acpi_res_lock); 1292 else
1292 list_for_each_entry(res_list_elem, &resource_list_head, 1293 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1293 resource_list) {
1294 if (ioport && (res_list_elem->resource_type
1295 != ACPI_ADR_SPACE_SYSTEM_IO))
1296 continue;
1297 if (!ioport && (res_list_elem->resource_type
1298 != ACPI_ADR_SPACE_SYSTEM_MEMORY))
1299 continue;
1300 1294
1301 if (res->end < res_list_elem->start 1295 length = res->end - res->start + 1;
1302 || res_list_elem->end < res->start) 1296 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1303 continue; 1297 warn = 1;
1304 clash = 1; 1298 clash = acpi_check_address_range(space_id, res->start, length, warn);
1305 break;
1306 }
1307 spin_unlock(&acpi_res_lock);
1308 1299
1309 if (clash) { 1300 if (clash) {
1310 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { 1301 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1311 printk(KERN_WARNING "ACPI: resource %s %pR"
1312 " conflicts with ACPI region %s "
1313 "[%s 0x%zx-0x%zx]\n",
1314 res->name, res, res_list_elem->name,
1315 (res_list_elem->resource_type ==
1316 ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem",
1317 (size_t) res_list_elem->start,
1318 (size_t) res_list_elem->end);
1319 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1302 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1320 printk(KERN_NOTICE "ACPI: This conflict may" 1303 printk(KERN_NOTICE "ACPI: This conflict may"
1321 " cause random problems and system" 1304 " cause random problems and system"
@@ -1467,155 +1450,6 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1467 kmem_cache_free(cache, object); 1450 kmem_cache_free(cache, object);
1468 return (AE_OK); 1451 return (AE_OK);
1469} 1452}
1470
1471static inline int acpi_res_list_add(struct acpi_res_list *res)
1472{
1473 struct acpi_res_list *res_list_elem;
1474
1475 list_for_each_entry(res_list_elem, &resource_list_head,
1476 resource_list) {
1477
1478 if (res->resource_type == res_list_elem->resource_type &&
1479 res->start == res_list_elem->start &&
1480 res->end == res_list_elem->end) {
1481
1482 /*
1483 * The Region(addr,len) already exist in the list,
1484 * just increase the count
1485 */
1486
1487 res_list_elem->count++;
1488 return 0;
1489 }
1490 }
1491
1492 res->count = 1;
1493 list_add(&res->resource_list, &resource_list_head);
1494 return 1;
1495}
1496
1497static inline void acpi_res_list_del(struct acpi_res_list *res)
1498{
1499 struct acpi_res_list *res_list_elem;
1500
1501 list_for_each_entry(res_list_elem, &resource_list_head,
1502 resource_list) {
1503
1504 if (res->resource_type == res_list_elem->resource_type &&
1505 res->start == res_list_elem->start &&
1506 res->end == res_list_elem->end) {
1507
1508 /*
1509 * If the res count is decreased to 0,
1510 * remove and free it
1511 */
1512
1513 if (--res_list_elem->count == 0) {
1514 list_del(&res_list_elem->resource_list);
1515 kfree(res_list_elem);
1516 }
1517 return;
1518 }
1519 }
1520}
1521
1522acpi_status
1523acpi_os_invalidate_address(
1524 u8 space_id,
1525 acpi_physical_address address,
1526 acpi_size length)
1527{
1528 struct acpi_res_list res;
1529
1530 switch (space_id) {
1531 case ACPI_ADR_SPACE_SYSTEM_IO:
1532 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1533 /* Only interference checks against SystemIO and SystemMemory
1534 are needed */
1535 res.start = address;
1536 res.end = address + length - 1;
1537 res.resource_type = space_id;
1538 spin_lock(&acpi_res_lock);
1539 acpi_res_list_del(&res);
1540 spin_unlock(&acpi_res_lock);
1541 break;
1542 case ACPI_ADR_SPACE_PCI_CONFIG:
1543 case ACPI_ADR_SPACE_EC:
1544 case ACPI_ADR_SPACE_SMBUS:
1545 case ACPI_ADR_SPACE_CMOS:
1546 case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1547 case ACPI_ADR_SPACE_DATA_TABLE:
1548 case ACPI_ADR_SPACE_FIXED_HARDWARE:
1549 break;
1550 }
1551 return AE_OK;
1552}
1553
1554/******************************************************************************
1555 *
1556 * FUNCTION: acpi_os_validate_address
1557 *
1558 * PARAMETERS: space_id - ACPI space ID
1559 * address - Physical address
1560 * length - Address length
1561 *
1562 * RETURN: AE_OK if address/length is valid for the space_id. Otherwise,
1563 * should return AE_AML_ILLEGAL_ADDRESS.
1564 *
1565 * DESCRIPTION: Validate a system address via the host OS. Used to validate
1566 * the addresses accessed by AML operation regions.
1567 *
1568 *****************************************************************************/
1569
1570acpi_status
1571acpi_os_validate_address (
1572 u8 space_id,
1573 acpi_physical_address address,
1574 acpi_size length,
1575 char *name)
1576{
1577 struct acpi_res_list *res;
1578 int added;
1579 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1580 return AE_OK;
1581
1582 switch (space_id) {
1583 case ACPI_ADR_SPACE_SYSTEM_IO:
1584 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1585 /* Only interference checks against SystemIO and SystemMemory
1586 are needed */
1587 res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
1588 if (!res)
1589 return AE_OK;
1590 /* ACPI names are fixed to 4 bytes, still better use strlcpy */
1591 strlcpy(res->name, name, 5);
1592 res->start = address;
1593 res->end = address + length - 1;
1594 res->resource_type = space_id;
1595 spin_lock(&acpi_res_lock);
1596 added = acpi_res_list_add(res);
1597 spin_unlock(&acpi_res_lock);
1598 pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, "
1599 "name: %s\n", added ? "Added" : "Already exist",
1600 (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
1601 ? "SystemIO" : "System Memory",
1602 (unsigned long long)res->start,
1603 (unsigned long long)res->end,
1604 res->name);
1605 if (!added)
1606 kfree(res);
1607 break;
1608 case ACPI_ADR_SPACE_PCI_CONFIG:
1609 case ACPI_ADR_SPACE_EC:
1610 case ACPI_ADR_SPACE_SMBUS:
1611 case ACPI_ADR_SPACE_CMOS:
1612 case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1613 case ACPI_ADR_SPACE_DATA_TABLE:
1614 case ACPI_ADR_SPACE_FIXED_HARDWARE:
1615 break;
1616 }
1617 return AE_OK;
1618}
1619#endif 1453#endif
1620 1454
1621acpi_status __init acpi_os_initialize(void) 1455acpi_status __init acpi_os_initialize(void)
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 3a0428e8435c..c850de4c9a14 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -173,8 +173,30 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
173 apic_id = map_mat_entry(handle, type, acpi_id); 173 apic_id = map_mat_entry(handle, type, acpi_id);
174 if (apic_id == -1) 174 if (apic_id == -1)
175 apic_id = map_madt_entry(type, acpi_id); 175 apic_id = map_madt_entry(type, acpi_id);
176 if (apic_id == -1) 176 if (apic_id == -1) {
177 return apic_id; 177 /*
178 * On UP processor, there is no _MAT or MADT table.
179 * So above apic_id is always set to -1.
180 *
181 * BIOS may define multiple CPU handles even for UP processor.
182 * For example,
183 *
184 * Scope (_PR)
185 * {
186 * Processor (CPU0, 0x00, 0x00000410, 0x06) {}
187 * Processor (CPU1, 0x01, 0x00000410, 0x06) {}
188 * Processor (CPU2, 0x02, 0x00000410, 0x06) {}
189 * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
190 * }
191 *
192 * Ignores apic_id and always return 0 for CPU0's handle.
193 * Return -1 for other CPU's handle.
194 */
195 if (acpi_id == 0)
196 return acpi_id;
197 else
198 return apic_id;
199 }
178 200
179#ifdef CONFIG_SMP 201#ifdef CONFIG_SMP
180 for_each_possible_cpu(i) { 202 for_each_possible_cpu(i) {
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 20a68ca386de..0034ede38710 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -82,7 +82,7 @@ MODULE_LICENSE("GPL");
82static int acpi_processor_add(struct acpi_device *device); 82static int acpi_processor_add(struct acpi_device *device);
83static int acpi_processor_remove(struct acpi_device *device, int type); 83static int acpi_processor_remove(struct acpi_device *device, int type);
84static void acpi_processor_notify(struct acpi_device *device, u32 event); 84static void acpi_processor_notify(struct acpi_device *device, u32 event);
85static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu); 85static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr);
86static int acpi_processor_handle_eject(struct acpi_processor *pr); 86static int acpi_processor_handle_eject(struct acpi_processor *pr);
87 87
88 88
@@ -324,10 +324,8 @@ static int acpi_processor_get_info(struct acpi_device *device)
324 * they are physically not present. 324 * they are physically not present.
325 */ 325 */
326 if (pr->id == -1) { 326 if (pr->id == -1) {
327 if (ACPI_FAILURE 327 if (ACPI_FAILURE(acpi_processor_hotadd_init(pr)))
328 (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
329 return -ENODEV; 328 return -ENODEV;
330 }
331 } 329 }
332 /* 330 /*
333 * On some boxes several processors use the same processor bus id. 331 * On some boxes several processors use the same processor bus id.
@@ -539,6 +537,7 @@ err_thermal_unregister:
539 thermal_cooling_device_unregister(pr->cdev); 537 thermal_cooling_device_unregister(pr->cdev);
540err_power_exit: 538err_power_exit:
541 acpi_processor_power_exit(pr, device); 539 acpi_processor_power_exit(pr, device);
540 sysfs_remove_link(&device->dev.kobj, "sysdev");
542err_free_cpumask: 541err_free_cpumask:
543 free_cpumask_var(pr->throttling.shared_cpu_map); 542 free_cpumask_var(pr->throttling.shared_cpu_map);
544 543
@@ -720,18 +719,19 @@ processor_walk_namespace_cb(acpi_handle handle,
720 return (AE_OK); 719 return (AE_OK);
721} 720}
722 721
723static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) 722static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
724{ 723{
724 acpi_handle handle = pr->handle;
725 725
726 if (!is_processor_present(handle)) { 726 if (!is_processor_present(handle)) {
727 return AE_ERROR; 727 return AE_ERROR;
728 } 728 }
729 729
730 if (acpi_map_lsapic(handle, p_cpu)) 730 if (acpi_map_lsapic(handle, &pr->id))
731 return AE_ERROR; 731 return AE_ERROR;
732 732
733 if (arch_register_cpu(*p_cpu)) { 733 if (arch_register_cpu(pr->id)) {
734 acpi_unmap_lsapic(*p_cpu); 734 acpi_unmap_lsapic(pr->id);
735 return AE_ERROR; 735 return AE_ERROR;
736 } 736 }
737 737
@@ -748,7 +748,7 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr)
748 return (0); 748 return (0);
749} 749}
750#else 750#else
751static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) 751static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
752{ 752{
753 return AE_ERROR; 753 return AE_ERROR;
754} 754}
@@ -827,8 +827,6 @@ static void __exit acpi_processor_exit(void)
827 827
828 acpi_bus_unregister_driver(&acpi_processor_driver); 828 acpi_bus_unregister_driver(&acpi_processor_driver);
829 829
830 cpuidle_unregister_driver(&acpi_idle_driver);
831
832 return; 830 return;
833} 831}
834 832
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 99dc5921e1dd..40fb12288ce2 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -915,9 +915,10 @@ static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
915 915
916/** 916/**
917 * __bus_register - register a driver-core subsystem 917 * __bus_register - register a driver-core subsystem
918 * @bus: bus. 918 * @bus: bus to register
919 * @key: lockdep class key
919 * 920 *
920 * Once we have that, we registered the bus with the kobject 921 * Once we have that, we register the bus with the kobject
921 * infrastructure, then register the children subsystems it has: 922 * infrastructure, then register the children subsystems it has:
922 * the devices and drivers that belong to the subsystem. 923 * the devices and drivers that belong to the subsystem.
923 */ 924 */
@@ -1220,8 +1221,8 @@ static void system_root_device_release(struct device *dev)
1220} 1221}
1221/** 1222/**
1222 * subsys_system_register - register a subsystem at /sys/devices/system/ 1223 * subsys_system_register - register a subsystem at /sys/devices/system/
1223 * @subsys - system subsystem 1224 * @subsys: system subsystem
1224 * @groups - default attributes for the root device 1225 * @groups: default attributes for the root device
1225 * 1226 *
1226 * All 'system' subsystems have a /sys/devices/system/<name> root device 1227 * All 'system' subsystems have a /sys/devices/system/<name> root device
1227 * with the name of the subsystem. The root device can carry subsystem- 1228 * with the name of the subsystem. The root device can carry subsystem-
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 26ab358dac62..6c9387d646ec 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -525,8 +525,7 @@ static int _request_firmware(const struct firmware **firmware_p,
525 if (!firmware) { 525 if (!firmware) {
526 dev_err(device, "%s: kmalloc(struct firmware) failed\n", 526 dev_err(device, "%s: kmalloc(struct firmware) failed\n",
527 __func__); 527 __func__);
528 retval = -ENOMEM; 528 return -ENOMEM;
529 goto out;
530 } 529 }
531 530
532 if (fw_get_builtin_firmware(firmware, name)) { 531 if (fw_get_builtin_firmware(firmware, name)) {
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index a30aa103f95b..4e4c8a4a5fd3 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -317,6 +317,17 @@ config BLK_DEV_NBD
317 317
318 If unsure, say N. 318 If unsure, say N.
319 319
320config BLK_DEV_NVME
321 tristate "NVM Express block device"
322 depends on PCI
323 ---help---
324 The NVM Express driver is for solid state drives directly
325 connected to the PCI or PCI Express bus. If you know you
326 don't have one of these, it is safe to answer N.
327
328 To compile this driver as a module, choose M here: the
329 module will be called nvme.
330
320config BLK_DEV_OSD 331config BLK_DEV_OSD
321 tristate "OSD object-as-blkdev support" 332 tristate "OSD object-as-blkdev support"
322 depends on SCSI_OSD_ULD 333 depends on SCSI_OSD_ULD
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index ad7b74a44ef3..5b795059f8fb 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
23obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o 23obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
24obj-$(CONFIG_MG_DISK) += mg_disk.o 24obj-$(CONFIG_MG_DISK) += mg_disk.o
25obj-$(CONFIG_SUNVDC) += sunvdc.o 25obj-$(CONFIG_SUNVDC) += sunvdc.o
26obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
26obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o 27obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o
27 28
28obj-$(CONFIG_BLK_DEV_UMEM) += umem.o 29obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
new file mode 100644
index 000000000000..c1dc4d86c221
--- /dev/null
+++ b/drivers/block/nvme.c
@@ -0,0 +1,1739 @@
1/*
2 * NVM Express device driver
3 * Copyright (c) 2011, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/nvme.h>
20#include <linux/bio.h>
21#include <linux/bitops.h>
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/errno.h>
25#include <linux/fs.h>
26#include <linux/genhd.h>
27#include <linux/idr.h>
28#include <linux/init.h>
29#include <linux/interrupt.h>
30#include <linux/io.h>
31#include <linux/kdev_t.h>
32#include <linux/kthread.h>
33#include <linux/kernel.h>
34#include <linux/mm.h>
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/pci.h>
38#include <linux/poison.h>
39#include <linux/sched.h>
40#include <linux/slab.h>
41#include <linux/types.h>
42#include <linux/version.h>
43
44#define NVME_Q_DEPTH 1024
45#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
46#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
47#define NVME_MINORS 64
48#define NVME_IO_TIMEOUT (5 * HZ)
49#define ADMIN_TIMEOUT (60 * HZ)
50
51static int nvme_major;
52module_param(nvme_major, int, 0);
53
54static int use_threaded_interrupts;
55module_param(use_threaded_interrupts, int, 0);
56
57static DEFINE_SPINLOCK(dev_list_lock);
58static LIST_HEAD(dev_list);
59static struct task_struct *nvme_thread;
60
61/*
62 * Represents an NVM Express device. Each nvme_dev is a PCI function.
63 */
64struct nvme_dev {
65 struct list_head node;
66 struct nvme_queue **queues;
67 u32 __iomem *dbs;
68 struct pci_dev *pci_dev;
69 struct dma_pool *prp_page_pool;
70 struct dma_pool *prp_small_pool;
71 int instance;
72 int queue_count;
73 int db_stride;
74 u32 ctrl_config;
75 struct msix_entry *entry;
76 struct nvme_bar __iomem *bar;
77 struct list_head namespaces;
78 char serial[20];
79 char model[40];
80 char firmware_rev[8];
81};
82
83/*
84 * An NVM Express namespace is equivalent to a SCSI LUN
85 */
86struct nvme_ns {
87 struct list_head list;
88
89 struct nvme_dev *dev;
90 struct request_queue *queue;
91 struct gendisk *disk;
92
93 int ns_id;
94 int lba_shift;
95};
96
97/*
98 * An NVM Express queue. Each device has at least two (one for admin
99 * commands and one for I/O commands).
100 */
101struct nvme_queue {
102 struct device *q_dmadev;
103 struct nvme_dev *dev;
104 spinlock_t q_lock;
105 struct nvme_command *sq_cmds;
106 volatile struct nvme_completion *cqes;
107 dma_addr_t sq_dma_addr;
108 dma_addr_t cq_dma_addr;
109 wait_queue_head_t sq_full;
110 wait_queue_t sq_cong_wait;
111 struct bio_list sq_cong;
112 u32 __iomem *q_db;
113 u16 q_depth;
114 u16 cq_vector;
115 u16 sq_head;
116 u16 sq_tail;
117 u16 cq_head;
118 u16 cq_phase;
119 unsigned long cmdid_data[];
120};
121
122/*
123 * Check we didin't inadvertently grow the command struct
124 */
125static inline void _nvme_check_size(void)
126{
127 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
128 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
129 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
130 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
131 BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
132 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
133 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
134 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
135 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
136}
137
138typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
139 struct nvme_completion *);
140
141struct nvme_cmd_info {
142 nvme_completion_fn fn;
143 void *ctx;
144 unsigned long timeout;
145};
146
147static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
148{
149 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
150}
151
152/**
153 * alloc_cmdid() - Allocate a Command ID
154 * @nvmeq: The queue that will be used for this command
155 * @ctx: A pointer that will be passed to the handler
156 * @handler: The function to call on completion
157 *
158 * Allocate a Command ID for a queue. The data passed in will
159 * be passed to the completion handler. This is implemented by using
160 * the bottom two bits of the ctx pointer to store the handler ID.
161 * Passing in a pointer that's not 4-byte aligned will cause a BUG.
162 * We can change this if it becomes a problem.
163 *
164 * May be called with local interrupts disabled and the q_lock held,
165 * or with interrupts enabled and no locks held.
166 */
167static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
168 nvme_completion_fn handler, unsigned timeout)
169{
170 int depth = nvmeq->q_depth - 1;
171 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
172 int cmdid;
173
174 do {
175 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
176 if (cmdid >= depth)
177 return -EBUSY;
178 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
179
180 info[cmdid].fn = handler;
181 info[cmdid].ctx = ctx;
182 info[cmdid].timeout = jiffies + timeout;
183 return cmdid;
184}
185
186static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
187 nvme_completion_fn handler, unsigned timeout)
188{
189 int cmdid;
190 wait_event_killable(nvmeq->sq_full,
191 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
192 return (cmdid < 0) ? -EINTR : cmdid;
193}
194
195/* Special values must be less than 0x1000 */
196#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
197#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
198#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
199#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
200#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
201
202static void special_completion(struct nvme_dev *dev, void *ctx,
203 struct nvme_completion *cqe)
204{
205 if (ctx == CMD_CTX_CANCELLED)
206 return;
207 if (ctx == CMD_CTX_FLUSH)
208 return;
209 if (ctx == CMD_CTX_COMPLETED) {
210 dev_warn(&dev->pci_dev->dev,
211 "completed id %d twice on queue %d\n",
212 cqe->command_id, le16_to_cpup(&cqe->sq_id));
213 return;
214 }
215 if (ctx == CMD_CTX_INVALID) {
216 dev_warn(&dev->pci_dev->dev,
217 "invalid id %d completed on queue %d\n",
218 cqe->command_id, le16_to_cpup(&cqe->sq_id));
219 return;
220 }
221
222 dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
223}
224
225/*
226 * Called with local interrupts disabled and the q_lock held. May not sleep.
227 */
228static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
229 nvme_completion_fn *fn)
230{
231 void *ctx;
232 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
233
234 if (cmdid >= nvmeq->q_depth) {
235 *fn = special_completion;
236 return CMD_CTX_INVALID;
237 }
238 *fn = info[cmdid].fn;
239 ctx = info[cmdid].ctx;
240 info[cmdid].fn = special_completion;
241 info[cmdid].ctx = CMD_CTX_COMPLETED;
242 clear_bit(cmdid, nvmeq->cmdid_data);
243 wake_up(&nvmeq->sq_full);
244 return ctx;
245}
246
247static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
248 nvme_completion_fn *fn)
249{
250 void *ctx;
251 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
252 if (fn)
253 *fn = info[cmdid].fn;
254 ctx = info[cmdid].ctx;
255 info[cmdid].fn = special_completion;
256 info[cmdid].ctx = CMD_CTX_CANCELLED;
257 return ctx;
258}
259
260static struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
261{
262 return dev->queues[get_cpu() + 1];
263}
264
265static void put_nvmeq(struct nvme_queue *nvmeq)
266{
267 put_cpu();
268}
269
270/**
271 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
272 * @nvmeq: The queue to use
273 * @cmd: The command to send
274 *
275 * Safe to use from interrupt context
276 */
277static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
278{
279 unsigned long flags;
280 u16 tail;
281 spin_lock_irqsave(&nvmeq->q_lock, flags);
282 tail = nvmeq->sq_tail;
283 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
284 if (++tail == nvmeq->q_depth)
285 tail = 0;
286 writel(tail, nvmeq->q_db);
287 nvmeq->sq_tail = tail;
288 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
289
290 return 0;
291}
292
293/*
294 * The nvme_iod describes the data in an I/O, including the list of PRP
295 * entries. You can't see it in this data structure because C doesn't let
296 * me express that. Use nvme_alloc_iod to ensure there's enough space
297 * allocated to store the PRP list.
298 */
299struct nvme_iod {
300 void *private; /* For the use of the submitter of the I/O */
301 int npages; /* In the PRP list. 0 means small pool in use */
302 int offset; /* Of PRP list */
303 int nents; /* Used in scatterlist */
304 int length; /* Of data, in bytes */
305 dma_addr_t first_dma;
306 struct scatterlist sg[0];
307};
308
309static __le64 **iod_list(struct nvme_iod *iod)
310{
311 return ((void *)iod) + iod->offset;
312}
313
314/*
315 * Will slightly overestimate the number of pages needed. This is OK
316 * as it only leads to a small amount of wasted memory for the lifetime of
317 * the I/O.
318 */
319static int nvme_npages(unsigned size)
320{
321 unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
322 return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
323}
324
325static struct nvme_iod *
326nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
327{
328 struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
329 sizeof(__le64 *) * nvme_npages(nbytes) +
330 sizeof(struct scatterlist) * nseg, gfp);
331
332 if (iod) {
333 iod->offset = offsetof(struct nvme_iod, sg[nseg]);
334 iod->npages = -1;
335 iod->length = nbytes;
336 }
337
338 return iod;
339}
340
341static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
342{
343 const int last_prp = PAGE_SIZE / 8 - 1;
344 int i;
345 __le64 **list = iod_list(iod);
346 dma_addr_t prp_dma = iod->first_dma;
347
348 if (iod->npages == 0)
349 dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
350 for (i = 0; i < iod->npages; i++) {
351 __le64 *prp_list = list[i];
352 dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
353 dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
354 prp_dma = next_prp_dma;
355 }
356 kfree(iod);
357}
358
359static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
360{
361 struct nvme_queue *nvmeq = get_nvmeq(dev);
362 if (bio_list_empty(&nvmeq->sq_cong))
363 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
364 bio_list_add(&nvmeq->sq_cong, bio);
365 put_nvmeq(nvmeq);
366 wake_up_process(nvme_thread);
367}
368
369static void bio_completion(struct nvme_dev *dev, void *ctx,
370 struct nvme_completion *cqe)
371{
372 struct nvme_iod *iod = ctx;
373 struct bio *bio = iod->private;
374 u16 status = le16_to_cpup(&cqe->status) >> 1;
375
376 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
377 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
378 nvme_free_iod(dev, iod);
379 if (status) {
380 bio_endio(bio, -EIO);
381 } else if (bio->bi_vcnt > bio->bi_idx) {
382 requeue_bio(dev, bio);
383 } else {
384 bio_endio(bio, 0);
385 }
386}
387
388/* length is in bytes. gfp flags indicates whether we may sleep. */
389static int nvme_setup_prps(struct nvme_dev *dev,
390 struct nvme_common_command *cmd, struct nvme_iod *iod,
391 int total_len, gfp_t gfp)
392{
393 struct dma_pool *pool;
394 int length = total_len;
395 struct scatterlist *sg = iod->sg;
396 int dma_len = sg_dma_len(sg);
397 u64 dma_addr = sg_dma_address(sg);
398 int offset = offset_in_page(dma_addr);
399 __le64 *prp_list;
400 __le64 **list = iod_list(iod);
401 dma_addr_t prp_dma;
402 int nprps, i;
403
404 cmd->prp1 = cpu_to_le64(dma_addr);
405 length -= (PAGE_SIZE - offset);
406 if (length <= 0)
407 return total_len;
408
409 dma_len -= (PAGE_SIZE - offset);
410 if (dma_len) {
411 dma_addr += (PAGE_SIZE - offset);
412 } else {
413 sg = sg_next(sg);
414 dma_addr = sg_dma_address(sg);
415 dma_len = sg_dma_len(sg);
416 }
417
418 if (length <= PAGE_SIZE) {
419 cmd->prp2 = cpu_to_le64(dma_addr);
420 return total_len;
421 }
422
423 nprps = DIV_ROUND_UP(length, PAGE_SIZE);
424 if (nprps <= (256 / 8)) {
425 pool = dev->prp_small_pool;
426 iod->npages = 0;
427 } else {
428 pool = dev->prp_page_pool;
429 iod->npages = 1;
430 }
431
432 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
433 if (!prp_list) {
434 cmd->prp2 = cpu_to_le64(dma_addr);
435 iod->npages = -1;
436 return (total_len - length) + PAGE_SIZE;
437 }
438 list[0] = prp_list;
439 iod->first_dma = prp_dma;
440 cmd->prp2 = cpu_to_le64(prp_dma);
441 i = 0;
442 for (;;) {
443 if (i == PAGE_SIZE / 8) {
444 __le64 *old_prp_list = prp_list;
445 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
446 if (!prp_list)
447 return total_len - length;
448 list[iod->npages++] = prp_list;
449 prp_list[0] = old_prp_list[i - 1];
450 old_prp_list[i - 1] = cpu_to_le64(prp_dma);
451 i = 1;
452 }
453 prp_list[i++] = cpu_to_le64(dma_addr);
454 dma_len -= PAGE_SIZE;
455 dma_addr += PAGE_SIZE;
456 length -= PAGE_SIZE;
457 if (length <= 0)
458 break;
459 if (dma_len > 0)
460 continue;
461 BUG_ON(dma_len < 0);
462 sg = sg_next(sg);
463 dma_addr = sg_dma_address(sg);
464 dma_len = sg_dma_len(sg);
465 }
466
467 return total_len;
468}
469
470/* NVMe scatterlists require no holes in the virtual address */
471#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
472 (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
473
474static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
475 struct bio *bio, enum dma_data_direction dma_dir, int psegs)
476{
477 struct bio_vec *bvec, *bvprv = NULL;
478 struct scatterlist *sg = NULL;
479 int i, old_idx, length = 0, nsegs = 0;
480
481 sg_init_table(iod->sg, psegs);
482 old_idx = bio->bi_idx;
483 bio_for_each_segment(bvec, bio, i) {
484 if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
485 sg->length += bvec->bv_len;
486 } else {
487 if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
488 break;
489 sg = sg ? sg + 1 : iod->sg;
490 sg_set_page(sg, bvec->bv_page, bvec->bv_len,
491 bvec->bv_offset);
492 nsegs++;
493 }
494 length += bvec->bv_len;
495 bvprv = bvec;
496 }
497 bio->bi_idx = i;
498 iod->nents = nsegs;
499 sg_mark_end(sg);
500 if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) {
501 bio->bi_idx = old_idx;
502 return -ENOMEM;
503 }
504 return length;
505}
506
507static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
508 int cmdid)
509{
510 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
511
512 memset(cmnd, 0, sizeof(*cmnd));
513 cmnd->common.opcode = nvme_cmd_flush;
514 cmnd->common.command_id = cmdid;
515 cmnd->common.nsid = cpu_to_le32(ns->ns_id);
516
517 if (++nvmeq->sq_tail == nvmeq->q_depth)
518 nvmeq->sq_tail = 0;
519 writel(nvmeq->sq_tail, nvmeq->q_db);
520
521 return 0;
522}
523
524static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
525{
526 int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
527 special_completion, NVME_IO_TIMEOUT);
528 if (unlikely(cmdid < 0))
529 return cmdid;
530
531 return nvme_submit_flush(nvmeq, ns, cmdid);
532}
533
534/*
535 * Called with local interrupts disabled and the q_lock held. May not sleep.
536 */
537static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
538 struct bio *bio)
539{
540 struct nvme_command *cmnd;
541 struct nvme_iod *iod;
542 enum dma_data_direction dma_dir;
543 int cmdid, length, result = -ENOMEM;
544 u16 control;
545 u32 dsmgmt;
546 int psegs = bio_phys_segments(ns->queue, bio);
547
548 if ((bio->bi_rw & REQ_FLUSH) && psegs) {
549 result = nvme_submit_flush_data(nvmeq, ns);
550 if (result)
551 return result;
552 }
553
554 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
555 if (!iod)
556 goto nomem;
557 iod->private = bio;
558
559 result = -EBUSY;
560 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
561 if (unlikely(cmdid < 0))
562 goto free_iod;
563
564 if ((bio->bi_rw & REQ_FLUSH) && !psegs)
565 return nvme_submit_flush(nvmeq, ns, cmdid);
566
567 control = 0;
568 if (bio->bi_rw & REQ_FUA)
569 control |= NVME_RW_FUA;
570 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
571 control |= NVME_RW_LR;
572
573 dsmgmt = 0;
574 if (bio->bi_rw & REQ_RAHEAD)
575 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
576
577 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
578
579 memset(cmnd, 0, sizeof(*cmnd));
580 if (bio_data_dir(bio)) {
581 cmnd->rw.opcode = nvme_cmd_write;
582 dma_dir = DMA_TO_DEVICE;
583 } else {
584 cmnd->rw.opcode = nvme_cmd_read;
585 dma_dir = DMA_FROM_DEVICE;
586 }
587
588 result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
589 if (result < 0)
590 goto free_iod;
591 length = result;
592
593 cmnd->rw.command_id = cmdid;
594 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
595 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
596 GFP_ATOMIC);
597 cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
598 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
599 cmnd->rw.control = cpu_to_le16(control);
600 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
601
602 bio->bi_sector += length >> 9;
603
604 if (++nvmeq->sq_tail == nvmeq->q_depth)
605 nvmeq->sq_tail = 0;
606 writel(nvmeq->sq_tail, nvmeq->q_db);
607
608 return 0;
609
610 free_iod:
611 nvme_free_iod(nvmeq->dev, iod);
612 nomem:
613 return result;
614}
615
616static void nvme_make_request(struct request_queue *q, struct bio *bio)
617{
618 struct nvme_ns *ns = q->queuedata;
619 struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
620 int result = -EBUSY;
621
622 spin_lock_irq(&nvmeq->q_lock);
623 if (bio_list_empty(&nvmeq->sq_cong))
624 result = nvme_submit_bio_queue(nvmeq, ns, bio);
625 if (unlikely(result)) {
626 if (bio_list_empty(&nvmeq->sq_cong))
627 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
628 bio_list_add(&nvmeq->sq_cong, bio);
629 }
630
631 spin_unlock_irq(&nvmeq->q_lock);
632 put_nvmeq(nvmeq);
633}
634
635static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
636{
637 u16 head, phase;
638
639 head = nvmeq->cq_head;
640 phase = nvmeq->cq_phase;
641
642 for (;;) {
643 void *ctx;
644 nvme_completion_fn fn;
645 struct nvme_completion cqe = nvmeq->cqes[head];
646 if ((le16_to_cpu(cqe.status) & 1) != phase)
647 break;
648 nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
649 if (++head == nvmeq->q_depth) {
650 head = 0;
651 phase = !phase;
652 }
653
654 ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
655 fn(nvmeq->dev, ctx, &cqe);
656 }
657
658 /* If the controller ignores the cq head doorbell and continuously
659 * writes to the queue, it is theoretically possible to wrap around
660 * the queue twice and mistakenly return IRQ_NONE. Linux only
661 * requires that 0.1% of your interrupts are handled, so this isn't
662 * a big problem.
663 */
664 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
665 return IRQ_NONE;
666
667 writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
668 nvmeq->cq_head = head;
669 nvmeq->cq_phase = phase;
670
671 return IRQ_HANDLED;
672}
673
674static irqreturn_t nvme_irq(int irq, void *data)
675{
676 irqreturn_t result;
677 struct nvme_queue *nvmeq = data;
678 spin_lock(&nvmeq->q_lock);
679 result = nvme_process_cq(nvmeq);
680 spin_unlock(&nvmeq->q_lock);
681 return result;
682}
683
684static irqreturn_t nvme_irq_check(int irq, void *data)
685{
686 struct nvme_queue *nvmeq = data;
687 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
688 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
689 return IRQ_NONE;
690 return IRQ_WAKE_THREAD;
691}
692
693static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
694{
695 spin_lock_irq(&nvmeq->q_lock);
696 cancel_cmdid(nvmeq, cmdid, NULL);
697 spin_unlock_irq(&nvmeq->q_lock);
698}
699
700struct sync_cmd_info {
701 struct task_struct *task;
702 u32 result;
703 int status;
704};
705
706static void sync_completion(struct nvme_dev *dev, void *ctx,
707 struct nvme_completion *cqe)
708{
709 struct sync_cmd_info *cmdinfo = ctx;
710 cmdinfo->result = le32_to_cpup(&cqe->result);
711 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
712 wake_up_process(cmdinfo->task);
713}
714
715/*
716 * Returns 0 on success. If the result is negative, it's a Linux error code;
717 * if the result is positive, it's an NVM Express status code
718 */
719static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
720 struct nvme_command *cmd, u32 *result, unsigned timeout)
721{
722 int cmdid;
723 struct sync_cmd_info cmdinfo;
724
725 cmdinfo.task = current;
726 cmdinfo.status = -EINTR;
727
728 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
729 timeout);
730 if (cmdid < 0)
731 return cmdid;
732 cmd->common.command_id = cmdid;
733
734 set_current_state(TASK_KILLABLE);
735 nvme_submit_cmd(nvmeq, cmd);
736 schedule();
737
738 if (cmdinfo.status == -EINTR) {
739 nvme_abort_command(nvmeq, cmdid);
740 return -EINTR;
741 }
742
743 if (result)
744 *result = cmdinfo.result;
745
746 return cmdinfo.status;
747}
748
749static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
750 u32 *result)
751{
752 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
753}
754
755static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
756{
757 int status;
758 struct nvme_command c;
759
760 memset(&c, 0, sizeof(c));
761 c.delete_queue.opcode = opcode;
762 c.delete_queue.qid = cpu_to_le16(id);
763
764 status = nvme_submit_admin_cmd(dev, &c, NULL);
765 if (status)
766 return -EIO;
767 return 0;
768}
769
770static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
771 struct nvme_queue *nvmeq)
772{
773 int status;
774 struct nvme_command c;
775 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
776
777 memset(&c, 0, sizeof(c));
778 c.create_cq.opcode = nvme_admin_create_cq;
779 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
780 c.create_cq.cqid = cpu_to_le16(qid);
781 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
782 c.create_cq.cq_flags = cpu_to_le16(flags);
783 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
784
785 status = nvme_submit_admin_cmd(dev, &c, NULL);
786 if (status)
787 return -EIO;
788 return 0;
789}
790
791static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
792 struct nvme_queue *nvmeq)
793{
794 int status;
795 struct nvme_command c;
796 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
797
798 memset(&c, 0, sizeof(c));
799 c.create_sq.opcode = nvme_admin_create_sq;
800 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
801 c.create_sq.sqid = cpu_to_le16(qid);
802 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
803 c.create_sq.sq_flags = cpu_to_le16(flags);
804 c.create_sq.cqid = cpu_to_le16(qid);
805
806 status = nvme_submit_admin_cmd(dev, &c, NULL);
807 if (status)
808 return -EIO;
809 return 0;
810}
811
812static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
813{
814 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
815}
816
817static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
818{
819 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
820}
821
822static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
823 dma_addr_t dma_addr)
824{
825 struct nvme_command c;
826
827 memset(&c, 0, sizeof(c));
828 c.identify.opcode = nvme_admin_identify;
829 c.identify.nsid = cpu_to_le32(nsid);
830 c.identify.prp1 = cpu_to_le64(dma_addr);
831 c.identify.cns = cpu_to_le32(cns);
832
833 return nvme_submit_admin_cmd(dev, &c, NULL);
834}
835
836static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
837 unsigned dword11, dma_addr_t dma_addr)
838{
839 struct nvme_command c;
840
841 memset(&c, 0, sizeof(c));
842 c.features.opcode = nvme_admin_get_features;
843 c.features.prp1 = cpu_to_le64(dma_addr);
844 c.features.fid = cpu_to_le32(fid);
845 c.features.dword11 = cpu_to_le32(dword11);
846
847 return nvme_submit_admin_cmd(dev, &c, NULL);
848}
849
850static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
851 unsigned dword11, dma_addr_t dma_addr, u32 *result)
852{
853 struct nvme_command c;
854
855 memset(&c, 0, sizeof(c));
856 c.features.opcode = nvme_admin_set_features;
857 c.features.prp1 = cpu_to_le64(dma_addr);
858 c.features.fid = cpu_to_le32(fid);
859 c.features.dword11 = cpu_to_le32(dword11);
860
861 return nvme_submit_admin_cmd(dev, &c, result);
862}
863
864static void nvme_free_queue(struct nvme_dev *dev, int qid)
865{
866 struct nvme_queue *nvmeq = dev->queues[qid];
867 int vector = dev->entry[nvmeq->cq_vector].vector;
868
869 irq_set_affinity_hint(vector, NULL);
870 free_irq(vector, nvmeq);
871
872 /* Don't tell the adapter to delete the admin queue */
873 if (qid) {
874 adapter_delete_sq(dev, qid);
875 adapter_delete_cq(dev, qid);
876 }
877
878 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
879 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
880 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
881 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
882 kfree(nvmeq);
883}
884
885static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
886 int depth, int vector)
887{
888 struct device *dmadev = &dev->pci_dev->dev;
889 unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
890 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
891 if (!nvmeq)
892 return NULL;
893
894 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
895 &nvmeq->cq_dma_addr, GFP_KERNEL);
896 if (!nvmeq->cqes)
897 goto free_nvmeq;
898 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
899
900 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
901 &nvmeq->sq_dma_addr, GFP_KERNEL);
902 if (!nvmeq->sq_cmds)
903 goto free_cqdma;
904
905 nvmeq->q_dmadev = dmadev;
906 nvmeq->dev = dev;
907 spin_lock_init(&nvmeq->q_lock);
908 nvmeq->cq_head = 0;
909 nvmeq->cq_phase = 1;
910 init_waitqueue_head(&nvmeq->sq_full);
911 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
912 bio_list_init(&nvmeq->sq_cong);
913 nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
914 nvmeq->q_depth = depth;
915 nvmeq->cq_vector = vector;
916
917 return nvmeq;
918
919 free_cqdma:
920 dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
921 nvmeq->cq_dma_addr);
922 free_nvmeq:
923 kfree(nvmeq);
924 return NULL;
925}
926
927static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
928 const char *name)
929{
930 if (use_threaded_interrupts)
931 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
932 nvme_irq_check, nvme_irq,
933 IRQF_DISABLED | IRQF_SHARED,
934 name, nvmeq);
935 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
936 IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
937}
938
939static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
940 int qid, int cq_size, int vector)
941{
942 int result;
943 struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
944
945 if (!nvmeq)
946 return ERR_PTR(-ENOMEM);
947
948 result = adapter_alloc_cq(dev, qid, nvmeq);
949 if (result < 0)
950 goto free_nvmeq;
951
952 result = adapter_alloc_sq(dev, qid, nvmeq);
953 if (result < 0)
954 goto release_cq;
955
956 result = queue_request_irq(dev, nvmeq, "nvme");
957 if (result < 0)
958 goto release_sq;
959
960 return nvmeq;
961
962 release_sq:
963 adapter_delete_sq(dev, qid);
964 release_cq:
965 adapter_delete_cq(dev, qid);
966 free_nvmeq:
967 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
968 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
969 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
970 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
971 kfree(nvmeq);
972 return ERR_PTR(result);
973}
974
975static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
976{
977 int result;
978 u32 aqa;
979 u64 cap;
980 unsigned long timeout;
981 struct nvme_queue *nvmeq;
982
983 dev->dbs = ((void __iomem *)dev->bar) + 4096;
984
985 nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
986 if (!nvmeq)
987 return -ENOMEM;
988
989 aqa = nvmeq->q_depth - 1;
990 aqa |= aqa << 16;
991
992 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
993 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
994 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
995 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
996
997 writel(0, &dev->bar->cc);
998 writel(aqa, &dev->bar->aqa);
999 writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
1000 writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
1001 writel(dev->ctrl_config, &dev->bar->cc);
1002
1003 cap = readq(&dev->bar->cap);
1004 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
1005 dev->db_stride = NVME_CAP_STRIDE(cap);
1006
1007 while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
1008 msleep(100);
1009 if (fatal_signal_pending(current))
1010 return -EINTR;
1011 if (time_after(jiffies, timeout)) {
1012 dev_err(&dev->pci_dev->dev,
1013 "Device not ready; aborting initialisation\n");
1014 return -ENODEV;
1015 }
1016 }
1017
1018 result = queue_request_irq(dev, nvmeq, "nvme admin");
1019 dev->queues[0] = nvmeq;
1020 return result;
1021}
1022
1023static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1024 unsigned long addr, unsigned length)
1025{
1026 int i, err, count, nents, offset;
1027 struct scatterlist *sg;
1028 struct page **pages;
1029 struct nvme_iod *iod;
1030
1031 if (addr & 3)
1032 return ERR_PTR(-EINVAL);
1033 if (!length)
1034 return ERR_PTR(-EINVAL);
1035
1036 offset = offset_in_page(addr);
1037 count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1038 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
1039
1040 err = get_user_pages_fast(addr, count, 1, pages);
1041 if (err < count) {
1042 count = err;
1043 err = -EFAULT;
1044 goto put_pages;
1045 }
1046
1047 iod = nvme_alloc_iod(count, length, GFP_KERNEL);
1048 sg = iod->sg;
1049 sg_init_table(sg, count);
1050 for (i = 0; i < count; i++) {
1051 sg_set_page(&sg[i], pages[i],
1052 min_t(int, length, PAGE_SIZE - offset), offset);
1053 length -= (PAGE_SIZE - offset);
1054 offset = 0;
1055 }
1056 sg_mark_end(&sg[i - 1]);
1057 iod->nents = count;
1058
1059 err = -ENOMEM;
1060 nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
1061 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1062 if (!nents)
1063 goto free_iod;
1064
1065 kfree(pages);
1066 return iod;
1067
1068 free_iod:
1069 kfree(iod);
1070 put_pages:
1071 for (i = 0; i < count; i++)
1072 put_page(pages[i]);
1073 kfree(pages);
1074 return ERR_PTR(err);
1075}
1076
1077static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
1078 struct nvme_iod *iod)
1079{
1080 int i;
1081
1082 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
1083 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1084
1085 for (i = 0; i < iod->nents; i++)
1086 put_page(sg_page(&iod->sg[i]));
1087}
1088
1089static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1090{
1091 struct nvme_dev *dev = ns->dev;
1092 struct nvme_queue *nvmeq;
1093 struct nvme_user_io io;
1094 struct nvme_command c;
1095 unsigned length;
1096 int status;
1097 struct nvme_iod *iod;
1098
1099 if (copy_from_user(&io, uio, sizeof(io)))
1100 return -EFAULT;
1101 length = (io.nblocks + 1) << ns->lba_shift;
1102
1103 switch (io.opcode) {
1104 case nvme_cmd_write:
1105 case nvme_cmd_read:
1106 case nvme_cmd_compare:
1107 iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
1108 break;
1109 default:
1110 return -EINVAL;
1111 }
1112
1113 if (IS_ERR(iod))
1114 return PTR_ERR(iod);
1115
1116 memset(&c, 0, sizeof(c));
1117 c.rw.opcode = io.opcode;
1118 c.rw.flags = io.flags;
1119 c.rw.nsid = cpu_to_le32(ns->ns_id);
1120 c.rw.slba = cpu_to_le64(io.slba);
1121 c.rw.length = cpu_to_le16(io.nblocks);
1122 c.rw.control = cpu_to_le16(io.control);
1123 c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
1124 c.rw.reftag = io.reftag;
1125 c.rw.apptag = io.apptag;
1126 c.rw.appmask = io.appmask;
1127 /* XXX: metadata */
1128 length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
1129
1130 nvmeq = get_nvmeq(dev);
1131 /*
1132 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
1133 * disabled. We may be preempted at any point, and be rescheduled
1134 * to a different CPU. That will cause cacheline bouncing, but no
1135 * additional races since q_lock already protects against other CPUs.
1136 */
1137 put_nvmeq(nvmeq);
1138 if (length != (io.nblocks + 1) << ns->lba_shift)
1139 status = -ENOMEM;
1140 else
1141 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
1142
1143 nvme_unmap_user_pages(dev, io.opcode & 1, iod);
1144 nvme_free_iod(dev, iod);
1145 return status;
1146}
1147
1148static int nvme_user_admin_cmd(struct nvme_ns *ns,
1149 struct nvme_admin_cmd __user *ucmd)
1150{
1151 struct nvme_dev *dev = ns->dev;
1152 struct nvme_admin_cmd cmd;
1153 struct nvme_command c;
1154 int status, length;
1155 struct nvme_iod *iod;
1156
1157 if (!capable(CAP_SYS_ADMIN))
1158 return -EACCES;
1159 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1160 return -EFAULT;
1161
1162 memset(&c, 0, sizeof(c));
1163 c.common.opcode = cmd.opcode;
1164 c.common.flags = cmd.flags;
1165 c.common.nsid = cpu_to_le32(cmd.nsid);
1166 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1167 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1168 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
1169 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
1170 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
1171 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
1172 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
1173 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
1174
1175 length = cmd.data_len;
1176 if (cmd.data_len) {
1177 iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
1178 length);
1179 if (IS_ERR(iod))
1180 return PTR_ERR(iod);
1181 length = nvme_setup_prps(dev, &c.common, iod, length,
1182 GFP_KERNEL);
1183 }
1184
1185 if (length != cmd.data_len)
1186 status = -ENOMEM;
1187 else
1188 status = nvme_submit_admin_cmd(dev, &c, NULL);
1189
1190 if (cmd.data_len) {
1191 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
1192 nvme_free_iod(dev, iod);
1193 }
1194 return status;
1195}
1196
1197static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
1198 unsigned long arg)
1199{
1200 struct nvme_ns *ns = bdev->bd_disk->private_data;
1201
1202 switch (cmd) {
1203 case NVME_IOCTL_ID:
1204 return ns->ns_id;
1205 case NVME_IOCTL_ADMIN_CMD:
1206 return nvme_user_admin_cmd(ns, (void __user *)arg);
1207 case NVME_IOCTL_SUBMIT_IO:
1208 return nvme_submit_io(ns, (void __user *)arg);
1209 default:
1210 return -ENOTTY;
1211 }
1212}
1213
1214static const struct block_device_operations nvme_fops = {
1215 .owner = THIS_MODULE,
1216 .ioctl = nvme_ioctl,
1217 .compat_ioctl = nvme_ioctl,
1218};
1219
1220static void nvme_timeout_ios(struct nvme_queue *nvmeq)
1221{
1222 int depth = nvmeq->q_depth - 1;
1223 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
1224 unsigned long now = jiffies;
1225 int cmdid;
1226
1227 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
1228 void *ctx;
1229 nvme_completion_fn fn;
1230 static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
1231
1232 if (!time_after(now, info[cmdid].timeout))
1233 continue;
1234 dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
1235 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
1236 fn(nvmeq->dev, ctx, &cqe);
1237 }
1238}
1239
1240static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1241{
1242 while (bio_list_peek(&nvmeq->sq_cong)) {
1243 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
1244 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
1245 if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
1246 bio_list_add_head(&nvmeq->sq_cong, bio);
1247 break;
1248 }
1249 if (bio_list_empty(&nvmeq->sq_cong))
1250 remove_wait_queue(&nvmeq->sq_full,
1251 &nvmeq->sq_cong_wait);
1252 }
1253}
1254
1255static int nvme_kthread(void *data)
1256{
1257 struct nvme_dev *dev;
1258
1259 while (!kthread_should_stop()) {
1260 __set_current_state(TASK_RUNNING);
1261 spin_lock(&dev_list_lock);
1262 list_for_each_entry(dev, &dev_list, node) {
1263 int i;
1264 for (i = 0; i < dev->queue_count; i++) {
1265 struct nvme_queue *nvmeq = dev->queues[i];
1266 if (!nvmeq)
1267 continue;
1268 spin_lock_irq(&nvmeq->q_lock);
1269 if (nvme_process_cq(nvmeq))
1270 printk("process_cq did something\n");
1271 nvme_timeout_ios(nvmeq);
1272 nvme_resubmit_bios(nvmeq);
1273 spin_unlock_irq(&nvmeq->q_lock);
1274 }
1275 }
1276 spin_unlock(&dev_list_lock);
1277 set_current_state(TASK_INTERRUPTIBLE);
1278 schedule_timeout(HZ);
1279 }
1280 return 0;
1281}
1282
1283static DEFINE_IDA(nvme_index_ida);
1284
1285static int nvme_get_ns_idx(void)
1286{
1287 int index, error;
1288
1289 do {
1290 if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
1291 return -1;
1292
1293 spin_lock(&dev_list_lock);
1294 error = ida_get_new(&nvme_index_ida, &index);
1295 spin_unlock(&dev_list_lock);
1296 } while (error == -EAGAIN);
1297
1298 if (error)
1299 index = -1;
1300 return index;
1301}
1302
1303static void nvme_put_ns_idx(int index)
1304{
1305 spin_lock(&dev_list_lock);
1306 ida_remove(&nvme_index_ida, index);
1307 spin_unlock(&dev_list_lock);
1308}
1309
1310static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
1311 struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
1312{
1313 struct nvme_ns *ns;
1314 struct gendisk *disk;
1315 int lbaf;
1316
1317 if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
1318 return NULL;
1319
1320 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
1321 if (!ns)
1322 return NULL;
1323 ns->queue = blk_alloc_queue(GFP_KERNEL);
1324 if (!ns->queue)
1325 goto out_free_ns;
1326 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
1327 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
1328 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
1329/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
1330 blk_queue_make_request(ns->queue, nvme_make_request);
1331 ns->dev = dev;
1332 ns->queue->queuedata = ns;
1333
1334 disk = alloc_disk(NVME_MINORS);
1335 if (!disk)
1336 goto out_free_queue;
1337 ns->ns_id = nsid;
1338 ns->disk = disk;
1339 lbaf = id->flbas & 0xf;
1340 ns->lba_shift = id->lbaf[lbaf].ds;
1341
1342 disk->major = nvme_major;
1343 disk->minors = NVME_MINORS;
1344 disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
1345 disk->fops = &nvme_fops;
1346 disk->private_data = ns;
1347 disk->queue = ns->queue;
1348 disk->driverfs_dev = &dev->pci_dev->dev;
1349 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
1350 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
1351
1352 return ns;
1353
1354 out_free_queue:
1355 blk_cleanup_queue(ns->queue);
1356 out_free_ns:
1357 kfree(ns);
1358 return NULL;
1359}
1360
1361static void nvme_ns_free(struct nvme_ns *ns)
1362{
1363 int index = ns->disk->first_minor / NVME_MINORS;
1364 put_disk(ns->disk);
1365 nvme_put_ns_idx(index);
1366 blk_cleanup_queue(ns->queue);
1367 kfree(ns);
1368}
1369
1370static int set_queue_count(struct nvme_dev *dev, int count)
1371{
1372 int status;
1373 u32 result;
1374 u32 q_count = (count - 1) | ((count - 1) << 16);
1375
1376 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
1377 &result);
1378 if (status)
1379 return -EIO;
1380 return min(result & 0xffff, result >> 16) + 1;
1381}
1382
1383static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
1384{
1385 int result, cpu, i, nr_io_queues, db_bar_size;
1386
1387 nr_io_queues = num_online_cpus();
1388 result = set_queue_count(dev, nr_io_queues);
1389 if (result < 0)
1390 return result;
1391 if (result < nr_io_queues)
1392 nr_io_queues = result;
1393
1394 /* Deregister the admin queue's interrupt */
1395 free_irq(dev->entry[0].vector, dev->queues[0]);
1396
1397 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
1398 if (db_bar_size > 8192) {
1399 iounmap(dev->bar);
1400 dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
1401 db_bar_size);
1402 dev->dbs = ((void __iomem *)dev->bar) + 4096;
1403 dev->queues[0]->q_db = dev->dbs;
1404 }
1405
1406 for (i = 0; i < nr_io_queues; i++)
1407 dev->entry[i].entry = i;
1408 for (;;) {
1409 result = pci_enable_msix(dev->pci_dev, dev->entry,
1410 nr_io_queues);
1411 if (result == 0) {
1412 break;
1413 } else if (result > 0) {
1414 nr_io_queues = result;
1415 continue;
1416 } else {
1417 nr_io_queues = 1;
1418 break;
1419 }
1420 }
1421
1422 result = queue_request_irq(dev, dev->queues[0], "nvme admin");
1423 /* XXX: handle failure here */
1424
1425 cpu = cpumask_first(cpu_online_mask);
1426 for (i = 0; i < nr_io_queues; i++) {
1427 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
1428 cpu = cpumask_next(cpu, cpu_online_mask);
1429 }
1430
1431 for (i = 0; i < nr_io_queues; i++) {
1432 dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
1433 NVME_Q_DEPTH, i);
1434 if (IS_ERR(dev->queues[i + 1]))
1435 return PTR_ERR(dev->queues[i + 1]);
1436 dev->queue_count++;
1437 }
1438
1439 for (; i < num_possible_cpus(); i++) {
1440 int target = i % rounddown_pow_of_two(dev->queue_count - 1);
1441 dev->queues[i + 1] = dev->queues[target + 1];
1442 }
1443
1444 return 0;
1445}
1446
1447static void nvme_free_queues(struct nvme_dev *dev)
1448{
1449 int i;
1450
1451 for (i = dev->queue_count - 1; i >= 0; i--)
1452 nvme_free_queue(dev, i);
1453}
1454
1455static int __devinit nvme_dev_add(struct nvme_dev *dev)
1456{
1457 int res, nn, i;
1458 struct nvme_ns *ns, *next;
1459 struct nvme_id_ctrl *ctrl;
1460 struct nvme_id_ns *id_ns;
1461 void *mem;
1462 dma_addr_t dma_addr;
1463
1464 res = nvme_setup_io_queues(dev);
1465 if (res)
1466 return res;
1467
1468 mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
1469 GFP_KERNEL);
1470
1471 res = nvme_identify(dev, 0, 1, dma_addr);
1472 if (res) {
1473 res = -EIO;
1474 goto out_free;
1475 }
1476
1477 ctrl = mem;
1478 nn = le32_to_cpup(&ctrl->nn);
1479 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
1480 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
1481 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
1482
1483 id_ns = mem;
1484 for (i = 1; i <= nn; i++) {
1485 res = nvme_identify(dev, i, 0, dma_addr);
1486 if (res)
1487 continue;
1488
1489 if (id_ns->ncap == 0)
1490 continue;
1491
1492 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
1493 dma_addr + 4096);
1494 if (res)
1495 continue;
1496
1497 ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
1498 if (ns)
1499 list_add_tail(&ns->list, &dev->namespaces);
1500 }
1501 list_for_each_entry(ns, &dev->namespaces, list)
1502 add_disk(ns->disk);
1503
1504 goto out;
1505
1506 out_free:
1507 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1508 list_del(&ns->list);
1509 nvme_ns_free(ns);
1510 }
1511
1512 out:
1513 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
1514 return res;
1515}
1516
1517static int nvme_dev_remove(struct nvme_dev *dev)
1518{
1519 struct nvme_ns *ns, *next;
1520
1521 spin_lock(&dev_list_lock);
1522 list_del(&dev->node);
1523 spin_unlock(&dev_list_lock);
1524
1525 /* TODO: wait all I/O finished or cancel them */
1526
1527 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1528 list_del(&ns->list);
1529 del_gendisk(ns->disk);
1530 nvme_ns_free(ns);
1531 }
1532
1533 nvme_free_queues(dev);
1534
1535 return 0;
1536}
1537
1538static int nvme_setup_prp_pools(struct nvme_dev *dev)
1539{
1540 struct device *dmadev = &dev->pci_dev->dev;
1541 dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
1542 PAGE_SIZE, PAGE_SIZE, 0);
1543 if (!dev->prp_page_pool)
1544 return -ENOMEM;
1545
1546 /* Optimisation for I/Os between 4k and 128k */
1547 dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
1548 256, 256, 0);
1549 if (!dev->prp_small_pool) {
1550 dma_pool_destroy(dev->prp_page_pool);
1551 return -ENOMEM;
1552 }
1553 return 0;
1554}
1555
1556static void nvme_release_prp_pools(struct nvme_dev *dev)
1557{
1558 dma_pool_destroy(dev->prp_page_pool);
1559 dma_pool_destroy(dev->prp_small_pool);
1560}
1561
1562/* XXX: Use an ida or something to let remove / add work correctly */
1563static void nvme_set_instance(struct nvme_dev *dev)
1564{
1565 static int instance;
1566 dev->instance = instance++;
1567}
1568
1569static void nvme_release_instance(struct nvme_dev *dev)
1570{
1571}
1572
1573static int __devinit nvme_probe(struct pci_dev *pdev,
1574 const struct pci_device_id *id)
1575{
1576 int bars, result = -ENOMEM;
1577 struct nvme_dev *dev;
1578
1579 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1580 if (!dev)
1581 return -ENOMEM;
1582 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
1583 GFP_KERNEL);
1584 if (!dev->entry)
1585 goto free;
1586 dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
1587 GFP_KERNEL);
1588 if (!dev->queues)
1589 goto free;
1590
1591 if (pci_enable_device_mem(pdev))
1592 goto free;
1593 pci_set_master(pdev);
1594 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1595 if (pci_request_selected_regions(pdev, bars, "nvme"))
1596 goto disable;
1597
1598 INIT_LIST_HEAD(&dev->namespaces);
1599 dev->pci_dev = pdev;
1600 pci_set_drvdata(pdev, dev);
1601 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1602 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1603 nvme_set_instance(dev);
1604 dev->entry[0].vector = pdev->irq;
1605
1606 result = nvme_setup_prp_pools(dev);
1607 if (result)
1608 goto disable_msix;
1609
1610 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
1611 if (!dev->bar) {
1612 result = -ENOMEM;
1613 goto disable_msix;
1614 }
1615
1616 result = nvme_configure_admin_queue(dev);
1617 if (result)
1618 goto unmap;
1619 dev->queue_count++;
1620
1621 spin_lock(&dev_list_lock);
1622 list_add(&dev->node, &dev_list);
1623 spin_unlock(&dev_list_lock);
1624
1625 result = nvme_dev_add(dev);
1626 if (result)
1627 goto delete;
1628
1629 return 0;
1630
1631 delete:
1632 spin_lock(&dev_list_lock);
1633 list_del(&dev->node);
1634 spin_unlock(&dev_list_lock);
1635
1636 nvme_free_queues(dev);
1637 unmap:
1638 iounmap(dev->bar);
1639 disable_msix:
1640 pci_disable_msix(pdev);
1641 nvme_release_instance(dev);
1642 nvme_release_prp_pools(dev);
1643 disable:
1644 pci_disable_device(pdev);
1645 pci_release_regions(pdev);
1646 free:
1647 kfree(dev->queues);
1648 kfree(dev->entry);
1649 kfree(dev);
1650 return result;
1651}
1652
1653static void __devexit nvme_remove(struct pci_dev *pdev)
1654{
1655 struct nvme_dev *dev = pci_get_drvdata(pdev);
1656 nvme_dev_remove(dev);
1657 pci_disable_msix(pdev);
1658 iounmap(dev->bar);
1659 nvme_release_instance(dev);
1660 nvme_release_prp_pools(dev);
1661 pci_disable_device(pdev);
1662 pci_release_regions(pdev);
1663 kfree(dev->queues);
1664 kfree(dev->entry);
1665 kfree(dev);
1666}
1667
1668/* These functions are yet to be implemented */
1669#define nvme_error_detected NULL
1670#define nvme_dump_registers NULL
1671#define nvme_link_reset NULL
1672#define nvme_slot_reset NULL
1673#define nvme_error_resume NULL
1674#define nvme_suspend NULL
1675#define nvme_resume NULL
1676
1677static struct pci_error_handlers nvme_err_handler = {
1678 .error_detected = nvme_error_detected,
1679 .mmio_enabled = nvme_dump_registers,
1680 .link_reset = nvme_link_reset,
1681 .slot_reset = nvme_slot_reset,
1682 .resume = nvme_error_resume,
1683};
1684
1685/* Move to pci_ids.h later */
1686#define PCI_CLASS_STORAGE_EXPRESS 0x010802
1687
1688static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
1689 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
1690 { 0, }
1691};
1692MODULE_DEVICE_TABLE(pci, nvme_id_table);
1693
1694static struct pci_driver nvme_driver = {
1695 .name = "nvme",
1696 .id_table = nvme_id_table,
1697 .probe = nvme_probe,
1698 .remove = __devexit_p(nvme_remove),
1699 .suspend = nvme_suspend,
1700 .resume = nvme_resume,
1701 .err_handler = &nvme_err_handler,
1702};
1703
1704static int __init nvme_init(void)
1705{
1706 int result = -EBUSY;
1707
1708 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
1709 if (IS_ERR(nvme_thread))
1710 return PTR_ERR(nvme_thread);
1711
1712 nvme_major = register_blkdev(nvme_major, "nvme");
1713 if (nvme_major <= 0)
1714 goto kill_kthread;
1715
1716 result = pci_register_driver(&nvme_driver);
1717 if (result)
1718 goto unregister_blkdev;
1719 return 0;
1720
1721 unregister_blkdev:
1722 unregister_blkdev(nvme_major, "nvme");
1723 kill_kthread:
1724 kthread_stop(nvme_thread);
1725 return result;
1726}
1727
1728static void __exit nvme_exit(void)
1729{
1730 pci_unregister_driver(&nvme_driver);
1731 unregister_blkdev(nvme_major, "nvme");
1732 kthread_stop(nvme_thread);
1733}
1734
1735MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
1736MODULE_LICENSE("GPL");
1737MODULE_VERSION("0.8");
1738module_init(nvme_init);
1739module_exit(nvme_exit);
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 6a8771f47a55..32362cf35b8d 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -846,6 +846,15 @@ int tpm_do_selftest(struct tpm_chip *chip)
846 846
847 do { 847 do {
848 rc = __tpm_pcr_read(chip, 0, digest); 848 rc = __tpm_pcr_read(chip, 0, digest);
849 if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
850 dev_info(chip->dev,
851 "TPM is disabled/deactivated (0x%X)\n", rc);
852 /* TPM is disabled and/or deactivated; driver can
853 * proceed and TPM does handle commands for
854 * suspend/resume correctly
855 */
856 return 0;
857 }
849 if (rc != TPM_WARN_DOING_SELFTEST) 858 if (rc != TPM_WARN_DOING_SELFTEST)
850 return rc; 859 return rc;
851 msleep(delay_msec); 860 msleep(delay_msec);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 8c1df302fbb6..010547138281 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -39,6 +39,9 @@ enum tpm_addr {
39}; 39};
40 40
41#define TPM_WARN_DOING_SELFTEST 0x802 41#define TPM_WARN_DOING_SELFTEST 0x802
42#define TPM_ERR_DEACTIVATED 0x6
43#define TPM_ERR_DISABLED 0x7
44
42#define TPM_HEADER_SIZE 10 45#define TPM_HEADER_SIZE 10
43extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr, 46extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr,
44 char *); 47 char *);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 37c4bd1cacd5..d0c41188d4e5 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -87,6 +87,7 @@ config GPIO_GENERIC_PLATFORM
87 87
88config GPIO_IT8761E 88config GPIO_IT8761E
89 tristate "IT8761E GPIO support" 89 tristate "IT8761E GPIO support"
90 depends on X86 # unconditional access to IO space.
90 help 91 help
91 Say yes here to support GPIO functionality of IT8761E super I/O chip. 92 Say yes here to support GPIO functionality of IT8761E super I/O chip.
92 93
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 461958fc2264..03d6dd5dcb77 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -248,7 +248,7 @@ static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
248static int ioh_irq_type(struct irq_data *d, unsigned int type) 248static int ioh_irq_type(struct irq_data *d, unsigned int type)
249{ 249{
250 u32 im; 250 u32 im;
251 u32 *im_reg; 251 void __iomem *im_reg;
252 u32 ien; 252 u32 ien;
253 u32 im_pos; 253 u32 im_pos;
254 int ch; 254 int ch;
@@ -412,7 +412,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
412 int i, j; 412 int i, j;
413 struct ioh_gpio *chip; 413 struct ioh_gpio *chip;
414 void __iomem *base; 414 void __iomem *base;
415 void __iomem *chip_save; 415 void *chip_save;
416 int irq_base; 416 int irq_base;
417 417
418 ret = pci_enable_device(pdev); 418 ret = pci_enable_device(pdev);
@@ -428,7 +428,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
428 } 428 }
429 429
430 base = pci_iomap(pdev, 1, 0); 430 base = pci_iomap(pdev, 1, 0);
431 if (base == 0) { 431 if (!base) {
432 dev_err(&pdev->dev, "%s : pci_iomap failed", __func__); 432 dev_err(&pdev->dev, "%s : pci_iomap failed", __func__);
433 ret = -ENOMEM; 433 ret = -ENOMEM;
434 goto err_iomap; 434 goto err_iomap;
@@ -521,7 +521,7 @@ static void __devexit ioh_gpio_remove(struct pci_dev *pdev)
521 int err; 521 int err;
522 int i; 522 int i;
523 struct ioh_gpio *chip = pci_get_drvdata(pdev); 523 struct ioh_gpio *chip = pci_get_drvdata(pdev);
524 void __iomem *chip_save; 524 void *chip_save;
525 525
526 chip_save = chip; 526 chip_save = chip;
527 527
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index f0603297f829..68fa55e86eb1 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -231,7 +231,7 @@ static void pch_gpio_setup(struct pch_gpio *chip)
231static int pch_irq_type(struct irq_data *d, unsigned int type) 231static int pch_irq_type(struct irq_data *d, unsigned int type)
232{ 232{
233 u32 im; 233 u32 im;
234 u32 *im_reg; 234 u32 __iomem *im_reg;
235 u32 ien; 235 u32 ien;
236 u32 im_pos; 236 u32 im_pos;
237 int ch; 237 int ch;
@@ -376,7 +376,7 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
376 } 376 }
377 377
378 chip->base = pci_iomap(pdev, 1, 0); 378 chip->base = pci_iomap(pdev, 1, 0);
379 if (chip->base == 0) { 379 if (!chip->base) {
380 dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__); 380 dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__);
381 ret = -ENOMEM; 381 ret = -ENOMEM;
382 goto err_iomap; 382 goto err_iomap;
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index b9c1c297669e..91f45b965d1e 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -52,7 +52,7 @@ static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
52 struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio); 52 struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
53 53
54 /* Set the initial value */ 54 /* Set the initial value */
55 tps65910_gpio_set(gc, 0, value); 55 tps65910_gpio_set(gc, offset, value);
56 56
57 return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset, 57 return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
58 GPIO_CFG_MASK); 58 GPIO_CFG_MASK);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index e770bd190a5c..5d5330f667f1 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include <drm/drmP.h> 22#include <drm/drmP.h>
23#include <linux/shmem_fs.h>
23#include "psb_drv.h" 24#include "psb_drv.h"
24 25
25 26
@@ -203,9 +204,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
203 gt->npage = pages; 204 gt->npage = pages;
204 205
205 for (i = 0; i < pages; i++) { 206 for (i = 0; i < pages; i++) {
206 /* FIXME: needs updating as per mail from Hugh Dickins */ 207 p = shmem_read_mapping_page(mapping, i);
207 p = read_cache_page_gfp(mapping, i,
208 __GFP_COLD | GFP_KERNEL);
209 if (IS_ERR(p)) 208 if (IS_ERR(p))
210 goto err; 209 goto err;
211 gt->pages[i] = p; 210 gt->pages[i] = p;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index cbe7a2fb779f..3101dd59e379 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -682,19 +682,19 @@ config I2C_XILINX
682 will be called xilinx_i2c. 682 will be called xilinx_i2c.
683 683
684config I2C_EG20T 684config I2C_EG20T
685 tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223)" 685 tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) I2C"
686 depends on PCI 686 depends on PCI
687 help 687 help
688 This driver is for PCH(Platform controller Hub) I2C of EG20T which 688 This driver is for PCH(Platform controller Hub) I2C of EG20T which
689 is an IOH(Input/Output Hub) for x86 embedded processor. 689 is an IOH(Input/Output Hub) for x86 embedded processor.
690 This driver can access PCH I2C bus device. 690 This driver can access PCH I2C bus device.
691 691
692 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ 692 This driver also can be used for LAPIS Semiconductor IOH(Input/
693 Output Hub), ML7213 and ML7223. 693 Output Hub), ML7213, ML7223 and ML7831.
694 ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is 694 ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
695 for MP(Media Phone) use. 695 for MP(Media Phone) use and ML7831 IOH is for general purpose use.
696 ML7213/ML7223 is companion chip for Intel Atom E6xx series. 696 ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
697 ML7213/ML7223 is completely compatible for Intel EG20T PCH. 697 ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
698 698
699comment "External I2C/SMBus adapter drivers" 699comment "External I2C/SMBus adapter drivers"
700 700
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 3ef3557b6e32..ca8877641040 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. 2 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
@@ -136,7 +136,8 @@
136/* 136/*
137Set the number of I2C instance max 137Set the number of I2C instance max
138Intel EG20T PCH : 1ch 138Intel EG20T PCH : 1ch
139OKI SEMICONDUCTOR ML7213 IOH : 2ch 139LAPIS Semiconductor ML7213 IOH : 2ch
140LAPIS Semiconductor ML7831 IOH : 1ch
140*/ 141*/
141#define PCH_I2C_MAX_DEV 2 142#define PCH_I2C_MAX_DEV 2
142 143
@@ -180,15 +181,17 @@ static int pch_clk = 50000; /* specifies I2C clock speed in KHz */
180static wait_queue_head_t pch_event; 181static wait_queue_head_t pch_event;
181static DEFINE_MUTEX(pch_mutex); 182static DEFINE_MUTEX(pch_mutex);
182 183
183/* Definition for ML7213 by OKI SEMICONDUCTOR */ 184/* Definition for ML7213 by LAPIS Semiconductor */
184#define PCI_VENDOR_ID_ROHM 0x10DB 185#define PCI_VENDOR_ID_ROHM 0x10DB
185#define PCI_DEVICE_ID_ML7213_I2C 0x802D 186#define PCI_DEVICE_ID_ML7213_I2C 0x802D
186#define PCI_DEVICE_ID_ML7223_I2C 0x8010 187#define PCI_DEVICE_ID_ML7223_I2C 0x8010
188#define PCI_DEVICE_ID_ML7831_I2C 0x8817
187 189
188static DEFINE_PCI_DEVICE_TABLE(pch_pcidev_id) = { 190static DEFINE_PCI_DEVICE_TABLE(pch_pcidev_id) = {
189 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C), 1, }, 191 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C), 1, },
190 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, }, 192 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, },
191 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_I2C), 1, }, 193 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_I2C), 1, },
194 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, },
192 {0,} 195 {0,}
193}; 196};
194 197
@@ -243,7 +246,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
243 if (pch_clk > PCH_MAX_CLK) 246 if (pch_clk > PCH_MAX_CLK)
244 pch_clk = 62500; 247 pch_clk = 62500;
245 248
246 pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8; 249 pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8);
247 /* Set transfer speed in I2CBC */ 250 /* Set transfer speed in I2CBC */
248 iowrite32(pch_i2cbc, p + PCH_I2CBC); 251 iowrite32(pch_i2cbc, p + PCH_I2CBC);
249 252
@@ -918,7 +921,9 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
918 pch_adap->dev.parent = &pdev->dev; 921 pch_adap->dev.parent = &pdev->dev;
919 922
920 pch_i2c_init(&adap_info->pch_data[i]); 923 pch_i2c_init(&adap_info->pch_data[i]);
921 ret = i2c_add_adapter(pch_adap); 924
925 pch_adap->nr = i;
926 ret = i2c_add_numbered_adapter(pch_adap);
922 if (ret) { 927 if (ret) {
923 pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i); 928 pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
924 goto err_add_adapter; 929 goto err_add_adapter;
@@ -1058,8 +1063,8 @@ static void __exit pch_pci_exit(void)
1058} 1063}
1059module_exit(pch_pci_exit); 1064module_exit(pch_pci_exit);
1060 1065
1061MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH I2C Driver"); 1066MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semico ML7213/ML7223/ML7831 IOH I2C");
1062MODULE_LICENSE("GPL"); 1067MODULE_LICENSE("GPL");
1063MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.okisemi.com>"); 1068MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.lapis-semi.com>");
1064module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR)); 1069module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
1065module_param(pch_clk, int, (S_IRUSR | S_IWUSR)); 1070module_param(pch_clk, int, (S_IRUSR | S_IWUSR));
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index fa23faa20f0e..f713eac55047 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -37,6 +37,9 @@
37#include <linux/platform_device.h> 37#include <linux/platform_device.h>
38#include <linux/clk.h> 38#include <linux/clk.h>
39#include <linux/io.h> 39#include <linux/io.h>
40#include <linux/of.h>
41#include <linux/of_i2c.h>
42#include <linux/of_device.h>
40#include <linux/slab.h> 43#include <linux/slab.h>
41#include <linux/i2c-omap.h> 44#include <linux/i2c-omap.h>
42#include <linux/pm_runtime.h> 45#include <linux/pm_runtime.h>
@@ -182,7 +185,9 @@ struct omap_i2c_dev {
182 u32 latency; /* maximum mpu wkup latency */ 185 u32 latency; /* maximum mpu wkup latency */
183 void (*set_mpu_wkup_lat)(struct device *dev, 186 void (*set_mpu_wkup_lat)(struct device *dev,
184 long latency); 187 long latency);
185 u32 speed; /* Speed of bus in Khz */ 188 u32 speed; /* Speed of bus in kHz */
189 u32 dtrev; /* extra revision from DT */
190 u32 flags;
186 u16 cmd_err; 191 u16 cmd_err;
187 u8 *buf; 192 u8 *buf;
188 u8 *regs; 193 u8 *regs;
@@ -235,7 +240,7 @@ static const u8 reg_map_ip_v2[] = {
235 [OMAP_I2C_BUF_REG] = 0x94, 240 [OMAP_I2C_BUF_REG] = 0x94,
236 [OMAP_I2C_CNT_REG] = 0x98, 241 [OMAP_I2C_CNT_REG] = 0x98,
237 [OMAP_I2C_DATA_REG] = 0x9c, 242 [OMAP_I2C_DATA_REG] = 0x9c,
238 [OMAP_I2C_SYSC_REG] = 0x20, 243 [OMAP_I2C_SYSC_REG] = 0x10,
239 [OMAP_I2C_CON_REG] = 0xa4, 244 [OMAP_I2C_CON_REG] = 0xa4,
240 [OMAP_I2C_OA_REG] = 0xa8, 245 [OMAP_I2C_OA_REG] = 0xa8,
241 [OMAP_I2C_SA_REG] = 0xac, 246 [OMAP_I2C_SA_REG] = 0xac,
@@ -266,11 +271,7 @@ static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
266 271
267static void omap_i2c_unidle(struct omap_i2c_dev *dev) 272static void omap_i2c_unidle(struct omap_i2c_dev *dev)
268{ 273{
269 struct omap_i2c_bus_platform_data *pdata; 274 if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
270
271 pdata = dev->dev->platform_data;
272
273 if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
274 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); 275 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
275 omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate); 276 omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
276 omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate); 277 omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
@@ -291,13 +292,10 @@ static void omap_i2c_unidle(struct omap_i2c_dev *dev)
291 292
292static void omap_i2c_idle(struct omap_i2c_dev *dev) 293static void omap_i2c_idle(struct omap_i2c_dev *dev)
293{ 294{
294 struct omap_i2c_bus_platform_data *pdata;
295 u16 iv; 295 u16 iv;
296 296
297 pdata = dev->dev->platform_data;
298
299 dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); 297 dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
300 if (pdata->rev == OMAP_I2C_IP_VERSION_2) 298 if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
301 omap_i2c_write_reg(dev, OMAP_I2C_IP_V2_IRQENABLE_CLR, 1); 299 omap_i2c_write_reg(dev, OMAP_I2C_IP_V2_IRQENABLE_CLR, 1);
302 else 300 else
303 omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0); 301 omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
@@ -320,9 +318,6 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
320 unsigned long timeout; 318 unsigned long timeout;
321 unsigned long internal_clk = 0; 319 unsigned long internal_clk = 0;
322 struct clk *fclk; 320 struct clk *fclk;
323 struct omap_i2c_bus_platform_data *pdata;
324
325 pdata = dev->dev->platform_data;
326 321
327 if (dev->rev >= OMAP_I2C_OMAP1_REV_2) { 322 if (dev->rev >= OMAP_I2C_OMAP1_REV_2) {
328 /* Disable I2C controller before soft reset */ 323 /* Disable I2C controller before soft reset */
@@ -373,7 +368,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
373 } 368 }
374 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); 369 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
375 370
376 if (pdata->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) { 371 if (dev->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
377 /* 372 /*
378 * The I2C functional clock is the armxor_ck, so there's 373 * The I2C functional clock is the armxor_ck, so there's
379 * no need to get "armxor_ck" separately. Now, if OMAP2420 374 * no need to get "armxor_ck" separately. Now, if OMAP2420
@@ -397,7 +392,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
397 psc = fclk_rate / 12000000; 392 psc = fclk_rate / 12000000;
398 } 393 }
399 394
400 if (!(pdata->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) { 395 if (!(dev->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
401 396
402 /* 397 /*
403 * HSI2C controller internal clk rate should be 19.2 Mhz for 398 * HSI2C controller internal clk rate should be 19.2 Mhz for
@@ -406,7 +401,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
406 * The filter is iclk (fclk for HS) period. 401 * The filter is iclk (fclk for HS) period.
407 */ 402 */
408 if (dev->speed > 400 || 403 if (dev->speed > 400 ||
409 pdata->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK) 404 dev->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
410 internal_clk = 19200; 405 internal_clk = 19200;
411 else if (dev->speed > 100) 406 else if (dev->speed > 100)
412 internal_clk = 9600; 407 internal_clk = 9600;
@@ -475,7 +470,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
475 470
476 dev->errata = 0; 471 dev->errata = 0;
477 472
478 if (pdata->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207) 473 if (dev->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
479 dev->errata |= I2C_OMAP_ERRATA_I207; 474 dev->errata |= I2C_OMAP_ERRATA_I207;
480 475
481 /* Enable interrupts */ 476 /* Enable interrupts */
@@ -484,7 +479,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
484 OMAP_I2C_IE_AL) | ((dev->fifo_size) ? 479 OMAP_I2C_IE_AL) | ((dev->fifo_size) ?
485 (OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0); 480 (OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
486 omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate); 481 omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
487 if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) { 482 if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
488 dev->pscstate = psc; 483 dev->pscstate = psc;
489 dev->scllstate = scll; 484 dev->scllstate = scll;
490 dev->sclhstate = sclh; 485 dev->sclhstate = sclh;
@@ -804,9 +799,6 @@ omap_i2c_isr(int this_irq, void *dev_id)
804 u16 bits; 799 u16 bits;
805 u16 stat, w; 800 u16 stat, w;
806 int err, count = 0; 801 int err, count = 0;
807 struct omap_i2c_bus_platform_data *pdata;
808
809 pdata = dev->dev->platform_data;
810 802
811 if (pm_runtime_suspended(dev->dev)) 803 if (pm_runtime_suspended(dev->dev))
812 return IRQ_NONE; 804 return IRQ_NONE;
@@ -830,11 +822,9 @@ complete:
830 ~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | 822 ~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
831 OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); 823 OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
832 824
833 if (stat & OMAP_I2C_STAT_NACK) { 825 if (stat & OMAP_I2C_STAT_NACK)
834 err |= OMAP_I2C_STAT_NACK; 826 err |= OMAP_I2C_STAT_NACK;
835 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 827
836 OMAP_I2C_CON_STP);
837 }
838 if (stat & OMAP_I2C_STAT_AL) { 828 if (stat & OMAP_I2C_STAT_AL) {
839 dev_err(dev->dev, "Arbitration lost\n"); 829 dev_err(dev->dev, "Arbitration lost\n");
840 err |= OMAP_I2C_STAT_AL; 830 err |= OMAP_I2C_STAT_AL;
@@ -875,7 +865,7 @@ complete:
875 * Data reg in 2430, omap3 and 865 * Data reg in 2430, omap3 and
876 * omap4 is 8 bit wide 866 * omap4 is 8 bit wide
877 */ 867 */
878 if (pdata->flags & 868 if (dev->flags &
879 OMAP_I2C_FLAG_16BIT_DATA_REG) { 869 OMAP_I2C_FLAG_16BIT_DATA_REG) {
880 if (dev->buf_len) { 870 if (dev->buf_len) {
881 *dev->buf++ = w >> 8; 871 *dev->buf++ = w >> 8;
@@ -918,7 +908,7 @@ complete:
918 * Data reg in 2430, omap3 and 908 * Data reg in 2430, omap3 and
919 * omap4 is 8 bit wide 909 * omap4 is 8 bit wide
920 */ 910 */
921 if (pdata->flags & 911 if (dev->flags &
922 OMAP_I2C_FLAG_16BIT_DATA_REG) { 912 OMAP_I2C_FLAG_16BIT_DATA_REG) {
923 if (dev->buf_len) { 913 if (dev->buf_len) {
924 w |= *dev->buf++ << 8; 914 w |= *dev->buf++ << 8;
@@ -965,6 +955,32 @@ static const struct i2c_algorithm omap_i2c_algo = {
965 .functionality = omap_i2c_func, 955 .functionality = omap_i2c_func,
966}; 956};
967 957
958#ifdef CONFIG_OF
959static struct omap_i2c_bus_platform_data omap3_pdata = {
960 .rev = OMAP_I2C_IP_VERSION_1,
961 .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 |
962 OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
963 OMAP_I2C_FLAG_BUS_SHIFT_2,
964};
965
966static struct omap_i2c_bus_platform_data omap4_pdata = {
967 .rev = OMAP_I2C_IP_VERSION_2,
968};
969
970static const struct of_device_id omap_i2c_of_match[] = {
971 {
972 .compatible = "ti,omap4-i2c",
973 .data = &omap4_pdata,
974 },
975 {
976 .compatible = "ti,omap3-i2c",
977 .data = &omap3_pdata,
978 },
979 { },
980};
981MODULE_DEVICE_TABLE(of, omap_i2c_of_match);
982#endif
983
968static int __devinit 984static int __devinit
969omap_i2c_probe(struct platform_device *pdev) 985omap_i2c_probe(struct platform_device *pdev)
970{ 986{
@@ -972,9 +988,10 @@ omap_i2c_probe(struct platform_device *pdev)
972 struct i2c_adapter *adap; 988 struct i2c_adapter *adap;
973 struct resource *mem, *irq, *ioarea; 989 struct resource *mem, *irq, *ioarea;
974 struct omap_i2c_bus_platform_data *pdata = pdev->dev.platform_data; 990 struct omap_i2c_bus_platform_data *pdata = pdev->dev.platform_data;
991 struct device_node *node = pdev->dev.of_node;
992 const struct of_device_id *match;
975 irq_handler_t isr; 993 irq_handler_t isr;
976 int r; 994 int r;
977 u32 speed = 0;
978 995
979 /* NOTE: driver uses the static register mapping */ 996 /* NOTE: driver uses the static register mapping */
980 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 997 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1001,15 +1018,24 @@ omap_i2c_probe(struct platform_device *pdev)
1001 goto err_release_region; 1018 goto err_release_region;
1002 } 1019 }
1003 1020
1004 if (pdata != NULL) { 1021 match = of_match_device(omap_i2c_of_match, &pdev->dev);
1005 speed = pdata->clkrate; 1022 if (match) {
1023 u32 freq = 100000; /* default to 100000 Hz */
1024
1025 pdata = match->data;
1026 dev->dtrev = pdata->rev;
1027 dev->flags = pdata->flags;
1028
1029 of_property_read_u32(node, "clock-frequency", &freq);
1030 /* convert DT freq value in Hz into kHz for speed */
1031 dev->speed = freq / 1000;
1032 } else if (pdata != NULL) {
1033 dev->speed = pdata->clkrate;
1034 dev->flags = pdata->flags;
1006 dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat; 1035 dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
1007 } else { 1036 dev->dtrev = pdata->rev;
1008 speed = 100; /* Default speed */
1009 dev->set_mpu_wkup_lat = NULL;
1010 } 1037 }
1011 1038
1012 dev->speed = speed;
1013 dev->dev = &pdev->dev; 1039 dev->dev = &pdev->dev;
1014 dev->irq = irq->start; 1040 dev->irq = irq->start;
1015 dev->base = ioremap(mem->start, resource_size(mem)); 1041 dev->base = ioremap(mem->start, resource_size(mem));
@@ -1020,9 +1046,9 @@ omap_i2c_probe(struct platform_device *pdev)
1020 1046
1021 platform_set_drvdata(pdev, dev); 1047 platform_set_drvdata(pdev, dev);
1022 1048
1023 dev->reg_shift = (pdata->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3; 1049 dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
1024 1050
1025 if (pdata->rev == OMAP_I2C_IP_VERSION_2) 1051 if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
1026 dev->regs = (u8 *)reg_map_ip_v2; 1052 dev->regs = (u8 *)reg_map_ip_v2;
1027 else 1053 else
1028 dev->regs = (u8 *)reg_map_ip_v1; 1054 dev->regs = (u8 *)reg_map_ip_v1;
@@ -1035,7 +1061,7 @@ omap_i2c_probe(struct platform_device *pdev)
1035 if (dev->rev <= OMAP_I2C_REV_ON_3430) 1061 if (dev->rev <= OMAP_I2C_REV_ON_3430)
1036 dev->errata |= I2C_OMAP3_1P153; 1062 dev->errata |= I2C_OMAP3_1P153;
1037 1063
1038 if (!(pdata->flags & OMAP_I2C_FLAG_NO_FIFO)) { 1064 if (!(dev->flags & OMAP_I2C_FLAG_NO_FIFO)) {
1039 u16 s; 1065 u16 s;
1040 1066
1041 /* Set up the fifo size - Get total size */ 1067 /* Set up the fifo size - Get total size */
@@ -1058,7 +1084,7 @@ omap_i2c_probe(struct platform_device *pdev)
1058 /* calculate wakeup latency constraint for MPU */ 1084 /* calculate wakeup latency constraint for MPU */
1059 if (dev->set_mpu_wkup_lat != NULL) 1085 if (dev->set_mpu_wkup_lat != NULL)
1060 dev->latency = (1000000 * dev->fifo_size) / 1086 dev->latency = (1000000 * dev->fifo_size) /
1061 (1000 * speed / 8); 1087 (1000 * dev->speed / 8);
1062 } 1088 }
1063 1089
1064 /* reset ASAP, clearing any IRQs */ 1090 /* reset ASAP, clearing any IRQs */
@@ -1074,7 +1100,7 @@ omap_i2c_probe(struct platform_device *pdev)
1074 } 1100 }
1075 1101
1076 dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", pdev->id, 1102 dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", pdev->id,
1077 pdata->rev, dev->rev >> 4, dev->rev & 0xf, dev->speed); 1103 dev->dtrev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
1078 1104
1079 pm_runtime_put(dev->dev); 1105 pm_runtime_put(dev->dev);
1080 1106
@@ -1085,6 +1111,7 @@ omap_i2c_probe(struct platform_device *pdev)
1085 strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name)); 1111 strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
1086 adap->algo = &omap_i2c_algo; 1112 adap->algo = &omap_i2c_algo;
1087 adap->dev.parent = &pdev->dev; 1113 adap->dev.parent = &pdev->dev;
1114 adap->dev.of_node = pdev->dev.of_node;
1088 1115
1089 /* i2c device drivers may be active on return from add_adapter() */ 1116 /* i2c device drivers may be active on return from add_adapter() */
1090 adap->nr = pdev->id; 1117 adap->nr = pdev->id;
@@ -1094,6 +1121,8 @@ omap_i2c_probe(struct platform_device *pdev)
1094 goto err_free_irq; 1121 goto err_free_irq;
1095 } 1122 }
1096 1123
1124 of_i2c_register_devices(adap);
1125
1097 return 0; 1126 return 0;
1098 1127
1099err_free_irq: 1128err_free_irq:
@@ -1166,6 +1195,7 @@ static struct platform_driver omap_i2c_driver = {
1166 .name = "omap_i2c", 1195 .name = "omap_i2c",
1167 .owner = THIS_MODULE, 1196 .owner = THIS_MODULE,
1168 .pm = OMAP_I2C_PM_OPS, 1197 .pm = OMAP_I2C_PM_OPS,
1198 .of_match_table = of_match_ptr(omap_i2c_of_match),
1169 }, 1199 },
1170}; 1200};
1171 1201
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 5d2f8e13cf0e..20bce51c2e82 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -197,7 +197,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
197 .enter = &intel_idle }, 197 .enter = &intel_idle },
198}; 198};
199 199
200static int get_driver_data(int cstate) 200static long get_driver_data(int cstate)
201{ 201{
202 int driver_data; 202 int driver_data;
203 switch (cstate) { 203 switch (cstate) {
@@ -232,6 +232,7 @@ static int get_driver_data(int cstate)
232 * @drv: cpuidle driver 232 * @drv: cpuidle driver
233 * @index: index of cpuidle state 233 * @index: index of cpuidle state
234 * 234 *
235 * Must be called under local_irq_disable().
235 */ 236 */
236static int intel_idle(struct cpuidle_device *dev, 237static int intel_idle(struct cpuidle_device *dev,
237 struct cpuidle_driver *drv, int index) 238 struct cpuidle_driver *drv, int index)
@@ -247,8 +248,6 @@ static int intel_idle(struct cpuidle_device *dev,
247 248
248 cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; 249 cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
249 250
250 local_irq_disable();
251
252 /* 251 /*
253 * leave_mm() to avoid costly and often unnecessary wakeups 252 * leave_mm() to avoid costly and often unnecessary wakeups
254 * for flushing the user TLB's associated with the active mm. 253 * for flushing the user TLB's associated with the active mm.
@@ -348,7 +347,8 @@ static int intel_idle_probe(void)
348 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); 347 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
349 348
350 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || 349 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
351 !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) 350 !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
351 !mwait_substates)
352 return -ENODEV; 352 return -ENODEV;
353 353
354 pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates); 354 pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
@@ -394,7 +394,7 @@ static int intel_idle_probe(void)
394 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ 394 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
395 lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; 395 lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
396 else { 396 else {
397 smp_call_function(__setup_broadcast_timer, (void *)true, 1); 397 on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
398 register_cpu_notifier(&setup_broadcast_notifier); 398 register_cpu_notifier(&setup_broadcast_notifier);
399 } 399 }
400 400
@@ -471,71 +471,67 @@ static int intel_idle_cpuidle_driver_init(void)
471 } 471 }
472 472
473 if (auto_demotion_disable_flags) 473 if (auto_demotion_disable_flags)
474 smp_call_function(auto_demotion_disable, NULL, 1); 474 on_each_cpu(auto_demotion_disable, NULL, 1);
475 475
476 return 0; 476 return 0;
477} 477}
478 478
479 479
480/* 480/*
481 * intel_idle_cpuidle_devices_init() 481 * intel_idle_cpu_init()
482 * allocate, initialize, register cpuidle_devices 482 * allocate, initialize, register cpuidle_devices
483 * @cpu: cpu/core to initialize
483 */ 484 */
484static int intel_idle_cpuidle_devices_init(void) 485int intel_idle_cpu_init(int cpu)
485{ 486{
486 int i, cstate; 487 int cstate;
487 struct cpuidle_device *dev; 488 struct cpuidle_device *dev;
488 489
489 intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); 490 dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
490 if (intel_idle_cpuidle_devices == NULL)
491 return -ENOMEM;
492
493 for_each_online_cpu(i) {
494 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
495 491
496 dev->state_count = 1; 492 dev->state_count = 1;
497 493
498 for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { 494 for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
499 int num_substates; 495 int num_substates;
500 496
501 if (cstate > max_cstate) { 497 if (cstate > max_cstate) {
502 printk(PREFIX "max_cstate %d reached\n", 498 printk(PREFIX "max_cstate %d reached\n",
503 max_cstate); 499 max_cstate);
504 break; 500 break;
505 } 501 }
506 502
507 /* does the state exist in CPUID.MWAIT? */ 503 /* does the state exist in CPUID.MWAIT? */
508 num_substates = (mwait_substates >> ((cstate) * 4)) 504 num_substates = (mwait_substates >> ((cstate) * 4))
509 & MWAIT_SUBSTATE_MASK; 505 & MWAIT_SUBSTATE_MASK;
510 if (num_substates == 0) 506 if (num_substates == 0)
511 continue; 507 continue;
512 /* is the state not enabled? */ 508 /* is the state not enabled? */
513 if (cpuidle_state_table[cstate].enter == NULL) { 509 if (cpuidle_state_table[cstate].enter == NULL)
514 continue; 510 continue;
515 }
516 511
517 dev->states_usage[dev->state_count].driver_data = 512 dev->states_usage[dev->state_count].driver_data =
518 (void *)get_driver_data(cstate); 513 (void *)get_driver_data(cstate);
519 514
520 dev->state_count += 1; 515 dev->state_count += 1;
521 } 516 }
517 dev->cpu = cpu;
522 518
523 dev->cpu = i; 519 if (cpuidle_register_device(dev)) {
524 if (cpuidle_register_device(dev)) { 520 pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
525 pr_debug(PREFIX "cpuidle_register_device %d failed!\n", 521 intel_idle_cpuidle_devices_uninit();
526 i); 522 return -EIO;
527 intel_idle_cpuidle_devices_uninit();
528 return -EIO;
529 }
530 } 523 }
531 524
525 if (auto_demotion_disable_flags)
526 smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
527
532 return 0; 528 return 0;
533} 529}
534 530
535 531
536static int __init intel_idle_init(void) 532static int __init intel_idle_init(void)
537{ 533{
538 int retval; 534 int retval, i;
539 535
540 /* Do not load intel_idle at all for now if idle= is passed */ 536 /* Do not load intel_idle at all for now if idle= is passed */
541 if (boot_option_idle_override != IDLE_NO_OVERRIDE) 537 if (boot_option_idle_override != IDLE_NO_OVERRIDE)
@@ -553,10 +549,16 @@ static int __init intel_idle_init(void)
553 return retval; 549 return retval;
554 } 550 }
555 551
556 retval = intel_idle_cpuidle_devices_init(); 552 intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
557 if (retval) { 553 if (intel_idle_cpuidle_devices == NULL)
558 cpuidle_unregister_driver(&intel_idle_driver); 554 return -ENOMEM;
559 return retval; 555
556 for_each_online_cpu(i) {
557 retval = intel_idle_cpu_init(i);
558 if (retval) {
559 cpuidle_unregister_driver(&intel_idle_driver);
560 return retval;
561 }
560 } 562 }
561 563
562 return 0; 564 return 0;
@@ -568,7 +570,7 @@ static void __exit intel_idle_exit(void)
568 cpuidle_unregister_driver(&intel_idle_driver); 570 cpuidle_unregister_driver(&intel_idle_driver);
569 571
570 if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) { 572 if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
571 smp_call_function(__setup_broadcast_timer, (void *)false, 1); 573 on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
572 unregister_cpu_notifier(&setup_broadcast_notifier); 574 unregister_cpu_notifier(&setup_broadcast_notifier);
573 } 575 }
574 576
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 0f9a84c1046a..eb0add311dc8 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -55,6 +55,7 @@ source "drivers/infiniband/hw/nes/Kconfig"
55source "drivers/infiniband/ulp/ipoib/Kconfig" 55source "drivers/infiniband/ulp/ipoib/Kconfig"
56 56
57source "drivers/infiniband/ulp/srp/Kconfig" 57source "drivers/infiniband/ulp/srp/Kconfig"
58source "drivers/infiniband/ulp/srpt/Kconfig"
58 59
59source "drivers/infiniband/ulp/iser/Kconfig" 60source "drivers/infiniband/ulp/iser/Kconfig"
60 61
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index 9cc7a47d3e67..a3b2d8eac86e 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -10,4 +10,5 @@ obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
10obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ 10obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
11obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ 11obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
12obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ 12obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
13obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/
13obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ 14obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig
new file mode 100644
index 000000000000..31ee83d528d9
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/Kconfig
@@ -0,0 +1,12 @@
1config INFINIBAND_SRPT
2 tristate "InfiniBand SCSI RDMA Protocol target support"
3 depends on INFINIBAND && TARGET_CORE
4 ---help---
5
6 Support for the SCSI RDMA Protocol (SRP) Target driver. The
7 SRP protocol is a protocol that allows an initiator to access
8 a block storage device on another host (target) over a network
9 that supports the RDMA protocol. Currently the RDMA protocol is
10 supported by InfiniBand and by iWarp network hardware. More
11 information about the SRP protocol can be found on the website
12 of the INCITS T10 technical committee (http://www.t10.org/).
diff --git a/drivers/infiniband/ulp/srpt/Makefile b/drivers/infiniband/ulp/srpt/Makefile
new file mode 100644
index 000000000000..e3ee4bdfffa5
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/Makefile
@@ -0,0 +1,2 @@
1ccflags-y := -Idrivers/target
2obj-$(CONFIG_INFINIBAND_SRPT) += ib_srpt.o
diff --git a/drivers/infiniband/ulp/srpt/ib_dm_mad.h b/drivers/infiniband/ulp/srpt/ib_dm_mad.h
new file mode 100644
index 000000000000..fb1de1f6f297
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_dm_mad.h
@@ -0,0 +1,139 @@
1/*
2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef IB_DM_MAD_H
35#define IB_DM_MAD_H
36
37#include <linux/types.h>
38
39#include <rdma/ib_mad.h>
40
41enum {
42 /*
43 * See also section 13.4.7 Status Field, table 115 MAD Common Status
44 * Field Bit Values and also section 16.3.1.1 Status Field in the
45 * InfiniBand Architecture Specification.
46 */
47 DM_MAD_STATUS_UNSUP_METHOD = 0x0008,
48 DM_MAD_STATUS_UNSUP_METHOD_ATTR = 0x000c,
49 DM_MAD_STATUS_INVALID_FIELD = 0x001c,
50 DM_MAD_STATUS_NO_IOC = 0x0100,
51
52 /*
53 * See also the Device Management chapter, section 16.3.3 Attributes,
54 * table 279 Device Management Attributes in the InfiniBand
55 * Architecture Specification.
56 */
57 DM_ATTR_CLASS_PORT_INFO = 0x01,
58 DM_ATTR_IOU_INFO = 0x10,
59 DM_ATTR_IOC_PROFILE = 0x11,
60 DM_ATTR_SVC_ENTRIES = 0x12
61};
62
63struct ib_dm_hdr {
64 u8 reserved[28];
65};
66
67/*
68 * Structure of management datagram sent by the SRP target implementation.
69 * Contains a management datagram header, reliable multi-packet transaction
70 * protocol (RMPP) header and ib_dm_hdr. Notes:
71 * - The SRP target implementation does not use RMPP or ib_dm_hdr when sending
72 * management datagrams.
73 * - The header size must be exactly 64 bytes (IB_MGMT_DEVICE_HDR), since this
74 * is the header size that is passed to ib_create_send_mad() in ib_srpt.c.
75 * - The maximum supported size for a management datagram when not using RMPP
76 * is 256 bytes -- 64 bytes header and 192 (IB_MGMT_DEVICE_DATA) bytes data.
77 */
78struct ib_dm_mad {
79 struct ib_mad_hdr mad_hdr;
80 struct ib_rmpp_hdr rmpp_hdr;
81 struct ib_dm_hdr dm_hdr;
82 u8 data[IB_MGMT_DEVICE_DATA];
83};
84
85/*
86 * IOUnitInfo as defined in section 16.3.3.3 IOUnitInfo of the InfiniBand
87 * Architecture Specification.
88 */
89struct ib_dm_iou_info {
90 __be16 change_id;
91 u8 max_controllers;
92 u8 op_rom;
93 u8 controller_list[128];
94};
95
96/*
97 * IOControllerprofile as defined in section 16.3.3.4 IOControllerProfile of
98 * the InfiniBand Architecture Specification.
99 */
100struct ib_dm_ioc_profile {
101 __be64 guid;
102 __be32 vendor_id;
103 __be32 device_id;
104 __be16 device_version;
105 __be16 reserved1;
106 __be32 subsys_vendor_id;
107 __be32 subsys_device_id;
108 __be16 io_class;
109 __be16 io_subclass;
110 __be16 protocol;
111 __be16 protocol_version;
112 __be16 service_conn;
113 __be16 initiators_supported;
114 __be16 send_queue_depth;
115 u8 reserved2;
116 u8 rdma_read_depth;
117 __be32 send_size;
118 __be32 rdma_size;
119 u8 op_cap_mask;
120 u8 svc_cap_mask;
121 u8 num_svc_entries;
122 u8 reserved3[9];
123 u8 id_string[64];
124};
125
126struct ib_dm_svc_entry {
127 u8 name[40];
128 __be64 id;
129};
130
131/*
132 * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
133 * Specification. See also section B.7, table B.8 in the T10 SRP r16a document.
134 */
135struct ib_dm_svc_entries {
136 struct ib_dm_svc_entry service_entries[4];
137};
138
139#endif
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
new file mode 100644
index 000000000000..cd5d05e22a77
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -0,0 +1,4073 @@
1/*
2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
3 * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/ctype.h>
40#include <linux/kthread.h>
41#include <linux/string.h>
42#include <linux/delay.h>
43#include <linux/atomic.h>
44#include <scsi/scsi_tcq.h>
45#include <target/configfs_macros.h>
46#include <target/target_core_base.h>
47#include <target/target_core_fabric_configfs.h>
48#include <target/target_core_fabric.h>
49#include <target/target_core_configfs.h>
50#include "ib_srpt.h"
51
52/* Name of this kernel module. */
53#define DRV_NAME "ib_srpt"
54#define DRV_VERSION "2.0.0"
55#define DRV_RELDATE "2011-02-14"
56
57#define SRPT_ID_STRING "Linux SRP target"
58
59#undef pr_fmt
60#define pr_fmt(fmt) DRV_NAME " " fmt
61
62MODULE_AUTHOR("Vu Pham and Bart Van Assche");
63MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
64 "v" DRV_VERSION " (" DRV_RELDATE ")");
65MODULE_LICENSE("Dual BSD/GPL");
66
67/*
68 * Global Variables
69 */
70
71static u64 srpt_service_guid;
72static spinlock_t srpt_dev_lock; /* Protects srpt_dev_list. */
73static struct list_head srpt_dev_list; /* List of srpt_device structures. */
74
75static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
76module_param(srp_max_req_size, int, 0444);
77MODULE_PARM_DESC(srp_max_req_size,
78 "Maximum size of SRP request messages in bytes.");
79
80static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
81module_param(srpt_srq_size, int, 0444);
82MODULE_PARM_DESC(srpt_srq_size,
83 "Shared receive queue (SRQ) size.");
84
85static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
86{
87 return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
88}
89module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
90 0444);
91MODULE_PARM_DESC(srpt_service_guid,
92 "Using this value for ioc_guid, id_ext, and cm_listen_id"
93 " instead of using the node_guid of the first HCA.");
94
95static struct ib_client srpt_client;
96static struct target_fabric_configfs *srpt_target;
97static void srpt_release_channel(struct srpt_rdma_ch *ch);
98static int srpt_queue_status(struct se_cmd *cmd);
99
100/**
101 * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE.
102 */
103static inline
104enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir)
105{
106 switch (dir) {
107 case DMA_TO_DEVICE: return DMA_FROM_DEVICE;
108 case DMA_FROM_DEVICE: return DMA_TO_DEVICE;
109 default: return dir;
110 }
111}
112
113/**
114 * srpt_sdev_name() - Return the name associated with the HCA.
115 *
116 * Examples are ib0, ib1, ...
117 */
118static inline const char *srpt_sdev_name(struct srpt_device *sdev)
119{
120 return sdev->device->name;
121}
122
123static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch)
124{
125 unsigned long flags;
126 enum rdma_ch_state state;
127
128 spin_lock_irqsave(&ch->spinlock, flags);
129 state = ch->state;
130 spin_unlock_irqrestore(&ch->spinlock, flags);
131 return state;
132}
133
134static enum rdma_ch_state
135srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state)
136{
137 unsigned long flags;
138 enum rdma_ch_state prev;
139
140 spin_lock_irqsave(&ch->spinlock, flags);
141 prev = ch->state;
142 ch->state = new_state;
143 spin_unlock_irqrestore(&ch->spinlock, flags);
144 return prev;
145}
146
147/**
148 * srpt_test_and_set_ch_state() - Test and set the channel state.
149 *
150 * Returns true if and only if the channel state has been set to the new state.
151 */
152static bool
153srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old,
154 enum rdma_ch_state new)
155{
156 unsigned long flags;
157 enum rdma_ch_state prev;
158
159 spin_lock_irqsave(&ch->spinlock, flags);
160 prev = ch->state;
161 if (prev == old)
162 ch->state = new;
163 spin_unlock_irqrestore(&ch->spinlock, flags);
164 return prev == old;
165}
166
167/**
168 * srpt_event_handler() - Asynchronous IB event callback function.
169 *
170 * Callback function called by the InfiniBand core when an asynchronous IB
171 * event occurs. This callback may occur in interrupt context. See also
172 * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
173 * Architecture Specification.
174 */
175static void srpt_event_handler(struct ib_event_handler *handler,
176 struct ib_event *event)
177{
178 struct srpt_device *sdev;
179 struct srpt_port *sport;
180
181 sdev = ib_get_client_data(event->device, &srpt_client);
182 if (!sdev || sdev->device != event->device)
183 return;
184
185 pr_debug("ASYNC event= %d on device= %s\n", event->event,
186 srpt_sdev_name(sdev));
187
188 switch (event->event) {
189 case IB_EVENT_PORT_ERR:
190 if (event->element.port_num <= sdev->device->phys_port_cnt) {
191 sport = &sdev->port[event->element.port_num - 1];
192 sport->lid = 0;
193 sport->sm_lid = 0;
194 }
195 break;
196 case IB_EVENT_PORT_ACTIVE:
197 case IB_EVENT_LID_CHANGE:
198 case IB_EVENT_PKEY_CHANGE:
199 case IB_EVENT_SM_CHANGE:
200 case IB_EVENT_CLIENT_REREGISTER:
201 /* Refresh port data asynchronously. */
202 if (event->element.port_num <= sdev->device->phys_port_cnt) {
203 sport = &sdev->port[event->element.port_num - 1];
204 if (!sport->lid && !sport->sm_lid)
205 schedule_work(&sport->work);
206 }
207 break;
208 default:
209 printk(KERN_ERR "received unrecognized IB event %d\n",
210 event->event);
211 break;
212 }
213}
214
215/**
216 * srpt_srq_event() - SRQ event callback function.
217 */
218static void srpt_srq_event(struct ib_event *event, void *ctx)
219{
220 printk(KERN_INFO "SRQ event %d\n", event->event);
221}
222
223/**
224 * srpt_qp_event() - QP event callback function.
225 */
226static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
227{
228 pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
229 event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch));
230
231 switch (event->event) {
232 case IB_EVENT_COMM_EST:
233 ib_cm_notify(ch->cm_id, event->event);
234 break;
235 case IB_EVENT_QP_LAST_WQE_REACHED:
236 if (srpt_test_and_set_ch_state(ch, CH_DRAINING,
237 CH_RELEASING))
238 srpt_release_channel(ch);
239 else
240 pr_debug("%s: state %d - ignored LAST_WQE.\n",
241 ch->sess_name, srpt_get_ch_state(ch));
242 break;
243 default:
244 printk(KERN_ERR "received unrecognized IB QP event %d\n",
245 event->event);
246 break;
247 }
248}
249
250/**
251 * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
252 *
253 * @slot: one-based slot number.
254 * @value: four-bit value.
255 *
256 * Copies the lowest four bits of value in element slot of the array of four
257 * bit elements called c_list (controller list). The index slot is one-based.
258 */
259static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
260{
261 u16 id;
262 u8 tmp;
263
264 id = (slot - 1) / 2;
265 if (slot & 0x1) {
266 tmp = c_list[id] & 0xf;
267 c_list[id] = (value << 4) | tmp;
268 } else {
269 tmp = c_list[id] & 0xf0;
270 c_list[id] = (value & 0xf) | tmp;
271 }
272}
273
274/**
275 * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
276 *
277 * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
278 * Specification.
279 */
280static void srpt_get_class_port_info(struct ib_dm_mad *mad)
281{
282 struct ib_class_port_info *cif;
283
284 cif = (struct ib_class_port_info *)mad->data;
285 memset(cif, 0, sizeof *cif);
286 cif->base_version = 1;
287 cif->class_version = 1;
288 cif->resp_time_value = 20;
289
290 mad->mad_hdr.status = 0;
291}
292
293/**
294 * srpt_get_iou() - Write IOUnitInfo to a management datagram.
295 *
296 * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
297 * Specification. See also section B.7, table B.6 in the SRP r16a document.
298 */
299static void srpt_get_iou(struct ib_dm_mad *mad)
300{
301 struct ib_dm_iou_info *ioui;
302 u8 slot;
303 int i;
304
305 ioui = (struct ib_dm_iou_info *)mad->data;
306 ioui->change_id = __constant_cpu_to_be16(1);
307 ioui->max_controllers = 16;
308
309 /* set present for slot 1 and empty for the rest */
310 srpt_set_ioc(ioui->controller_list, 1, 1);
311 for (i = 1, slot = 2; i < 16; i++, slot++)
312 srpt_set_ioc(ioui->controller_list, slot, 0);
313
314 mad->mad_hdr.status = 0;
315}
316
317/**
318 * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
319 *
320 * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
321 * Architecture Specification. See also section B.7, table B.7 in the SRP
322 * r16a document.
323 */
324static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
325 struct ib_dm_mad *mad)
326{
327 struct srpt_device *sdev = sport->sdev;
328 struct ib_dm_ioc_profile *iocp;
329
330 iocp = (struct ib_dm_ioc_profile *)mad->data;
331
332 if (!slot || slot > 16) {
333 mad->mad_hdr.status
334 = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
335 return;
336 }
337
338 if (slot > 2) {
339 mad->mad_hdr.status
340 = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
341 return;
342 }
343
344 memset(iocp, 0, sizeof *iocp);
345 strcpy(iocp->id_string, SRPT_ID_STRING);
346 iocp->guid = cpu_to_be64(srpt_service_guid);
347 iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
348 iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id);
349 iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
350 iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
351 iocp->subsys_device_id = 0x0;
352 iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
353 iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS);
354 iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL);
355 iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION);
356 iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
357 iocp->rdma_read_depth = 4;
358 iocp->send_size = cpu_to_be32(srp_max_req_size);
359 iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
360 1U << 24));
361 iocp->num_svc_entries = 1;
362 iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
363 SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
364
365 mad->mad_hdr.status = 0;
366}
367
368/**
369 * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
370 *
371 * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
372 * Specification. See also section B.7, table B.8 in the SRP r16a document.
373 */
374static void srpt_get_svc_entries(u64 ioc_guid,
375 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
376{
377 struct ib_dm_svc_entries *svc_entries;
378
379 WARN_ON(!ioc_guid);
380
381 if (!slot || slot > 16) {
382 mad->mad_hdr.status
383 = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
384 return;
385 }
386
387 if (slot > 2 || lo > hi || hi > 1) {
388 mad->mad_hdr.status
389 = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
390 return;
391 }
392
393 svc_entries = (struct ib_dm_svc_entries *)mad->data;
394 memset(svc_entries, 0, sizeof *svc_entries);
395 svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
396 snprintf(svc_entries->service_entries[0].name,
397 sizeof(svc_entries->service_entries[0].name),
398 "%s%016llx",
399 SRP_SERVICE_NAME_PREFIX,
400 ioc_guid);
401
402 mad->mad_hdr.status = 0;
403}
404
405/**
406 * srpt_mgmt_method_get() - Process a received management datagram.
407 * @sp: source port through which the MAD has been received.
408 * @rq_mad: received MAD.
409 * @rsp_mad: response MAD.
410 */
411static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
412 struct ib_dm_mad *rsp_mad)
413{
414 u16 attr_id;
415 u32 slot;
416 u8 hi, lo;
417
418 attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
419 switch (attr_id) {
420 case DM_ATTR_CLASS_PORT_INFO:
421 srpt_get_class_port_info(rsp_mad);
422 break;
423 case DM_ATTR_IOU_INFO:
424 srpt_get_iou(rsp_mad);
425 break;
426 case DM_ATTR_IOC_PROFILE:
427 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
428 srpt_get_ioc(sp, slot, rsp_mad);
429 break;
430 case DM_ATTR_SVC_ENTRIES:
431 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
432 hi = (u8) ((slot >> 8) & 0xff);
433 lo = (u8) (slot & 0xff);
434 slot = (u16) ((slot >> 16) & 0xffff);
435 srpt_get_svc_entries(srpt_service_guid,
436 slot, hi, lo, rsp_mad);
437 break;
438 default:
439 rsp_mad->mad_hdr.status =
440 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
441 break;
442 }
443}
444
445/**
446 * srpt_mad_send_handler() - Post MAD-send callback function.
447 */
448static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
449 struct ib_mad_send_wc *mad_wc)
450{
451 ib_destroy_ah(mad_wc->send_buf->ah);
452 ib_free_send_mad(mad_wc->send_buf);
453}
454
455/**
456 * srpt_mad_recv_handler() - MAD reception callback function.
457 */
458static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
459 struct ib_mad_recv_wc *mad_wc)
460{
461 struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
462 struct ib_ah *ah;
463 struct ib_mad_send_buf *rsp;
464 struct ib_dm_mad *dm_mad;
465
466 if (!mad_wc || !mad_wc->recv_buf.mad)
467 return;
468
469 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
470 mad_wc->recv_buf.grh, mad_agent->port_num);
471 if (IS_ERR(ah))
472 goto err;
473
474 BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
475
476 rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
477 mad_wc->wc->pkey_index, 0,
478 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
479 GFP_KERNEL);
480 if (IS_ERR(rsp))
481 goto err_rsp;
482
483 rsp->ah = ah;
484
485 dm_mad = rsp->mad;
486 memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
487 dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
488 dm_mad->mad_hdr.status = 0;
489
490 switch (mad_wc->recv_buf.mad->mad_hdr.method) {
491 case IB_MGMT_METHOD_GET:
492 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
493 break;
494 case IB_MGMT_METHOD_SET:
495 dm_mad->mad_hdr.status =
496 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
497 break;
498 default:
499 dm_mad->mad_hdr.status =
500 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
501 break;
502 }
503
504 if (!ib_post_send_mad(rsp, NULL)) {
505 ib_free_recv_mad(mad_wc);
506 /* will destroy_ah & free_send_mad in send completion */
507 return;
508 }
509
510 ib_free_send_mad(rsp);
511
512err_rsp:
513 ib_destroy_ah(ah);
514err:
515 ib_free_recv_mad(mad_wc);
516}
517
518/**
519 * srpt_refresh_port() - Configure a HCA port.
520 *
521 * Enable InfiniBand management datagram processing, update the cached sm_lid,
522 * lid and gid values, and register a callback function for processing MADs
523 * on the specified port.
524 *
525 * Note: It is safe to call this function more than once for the same port.
526 */
527static int srpt_refresh_port(struct srpt_port *sport)
528{
529 struct ib_mad_reg_req reg_req;
530 struct ib_port_modify port_modify;
531 struct ib_port_attr port_attr;
532 int ret;
533
534 memset(&port_modify, 0, sizeof port_modify);
535 port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
536 port_modify.clr_port_cap_mask = 0;
537
538 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
539 if (ret)
540 goto err_mod_port;
541
542 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
543 if (ret)
544 goto err_query_port;
545
546 sport->sm_lid = port_attr.sm_lid;
547 sport->lid = port_attr.lid;
548
549 ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
550 if (ret)
551 goto err_query_port;
552
553 if (!sport->mad_agent) {
554 memset(&reg_req, 0, sizeof reg_req);
555 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
556 reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
557 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
558 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
559
560 sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
561 sport->port,
562 IB_QPT_GSI,
563 &reg_req, 0,
564 srpt_mad_send_handler,
565 srpt_mad_recv_handler,
566 sport);
567 if (IS_ERR(sport->mad_agent)) {
568 ret = PTR_ERR(sport->mad_agent);
569 sport->mad_agent = NULL;
570 goto err_query_port;
571 }
572 }
573
574 return 0;
575
576err_query_port:
577
578 port_modify.set_port_cap_mask = 0;
579 port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
580 ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
581
582err_mod_port:
583
584 return ret;
585}
586
587/**
588 * srpt_unregister_mad_agent() - Unregister MAD callback functions.
589 *
590 * Note: It is safe to call this function more than once for the same device.
591 */
592static void srpt_unregister_mad_agent(struct srpt_device *sdev)
593{
594 struct ib_port_modify port_modify = {
595 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
596 };
597 struct srpt_port *sport;
598 int i;
599
600 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
601 sport = &sdev->port[i - 1];
602 WARN_ON(sport->port != i);
603 if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
604 printk(KERN_ERR "disabling MAD processing failed.\n");
605 if (sport->mad_agent) {
606 ib_unregister_mad_agent(sport->mad_agent);
607 sport->mad_agent = NULL;
608 }
609 }
610}
611
612/**
613 * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
614 */
615static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
616 int ioctx_size, int dma_size,
617 enum dma_data_direction dir)
618{
619 struct srpt_ioctx *ioctx;
620
621 ioctx = kmalloc(ioctx_size, GFP_KERNEL);
622 if (!ioctx)
623 goto err;
624
625 ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
626 if (!ioctx->buf)
627 goto err_free_ioctx;
628
629 ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
630 if (ib_dma_mapping_error(sdev->device, ioctx->dma))
631 goto err_free_buf;
632
633 return ioctx;
634
635err_free_buf:
636 kfree(ioctx->buf);
637err_free_ioctx:
638 kfree(ioctx);
639err:
640 return NULL;
641}
642
643/**
644 * srpt_free_ioctx() - Free an SRPT I/O context structure.
645 */
646static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
647 int dma_size, enum dma_data_direction dir)
648{
649 if (!ioctx)
650 return;
651
652 ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
653 kfree(ioctx->buf);
654 kfree(ioctx);
655}
656
657/**
658 * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
659 * @sdev: Device to allocate the I/O context ring for.
660 * @ring_size: Number of elements in the I/O context ring.
661 * @ioctx_size: I/O context size.
662 * @dma_size: DMA buffer size.
663 * @dir: DMA data direction.
664 */
665static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
666 int ring_size, int ioctx_size,
667 int dma_size, enum dma_data_direction dir)
668{
669 struct srpt_ioctx **ring;
670 int i;
671
672 WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
673 && ioctx_size != sizeof(struct srpt_send_ioctx));
674
675 ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
676 if (!ring)
677 goto out;
678 for (i = 0; i < ring_size; ++i) {
679 ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
680 if (!ring[i])
681 goto err;
682 ring[i]->index = i;
683 }
684 goto out;
685
686err:
687 while (--i >= 0)
688 srpt_free_ioctx(sdev, ring[i], dma_size, dir);
689 kfree(ring);
690out:
691 return ring;
692}
693
694/**
695 * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
696 */
697static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
698 struct srpt_device *sdev, int ring_size,
699 int dma_size, enum dma_data_direction dir)
700{
701 int i;
702
703 for (i = 0; i < ring_size; ++i)
704 srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
705 kfree(ioctx_ring);
706}
707
708/**
709 * srpt_get_cmd_state() - Get the state of a SCSI command.
710 */
711static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
712{
713 enum srpt_command_state state;
714 unsigned long flags;
715
716 BUG_ON(!ioctx);
717
718 spin_lock_irqsave(&ioctx->spinlock, flags);
719 state = ioctx->state;
720 spin_unlock_irqrestore(&ioctx->spinlock, flags);
721 return state;
722}
723
724/**
725 * srpt_set_cmd_state() - Set the state of a SCSI command.
726 *
727 * Does not modify the state of aborted commands. Returns the previous command
728 * state.
729 */
730static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
731 enum srpt_command_state new)
732{
733 enum srpt_command_state previous;
734 unsigned long flags;
735
736 BUG_ON(!ioctx);
737
738 spin_lock_irqsave(&ioctx->spinlock, flags);
739 previous = ioctx->state;
740 if (previous != SRPT_STATE_DONE)
741 ioctx->state = new;
742 spin_unlock_irqrestore(&ioctx->spinlock, flags);
743
744 return previous;
745}
746
747/**
748 * srpt_test_and_set_cmd_state() - Test and set the state of a command.
749 *
750 * Returns true if and only if the previous command state was equal to 'old'.
751 */
752static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
753 enum srpt_command_state old,
754 enum srpt_command_state new)
755{
756 enum srpt_command_state previous;
757 unsigned long flags;
758
759 WARN_ON(!ioctx);
760 WARN_ON(old == SRPT_STATE_DONE);
761 WARN_ON(new == SRPT_STATE_NEW);
762
763 spin_lock_irqsave(&ioctx->spinlock, flags);
764 previous = ioctx->state;
765 if (previous == old)
766 ioctx->state = new;
767 spin_unlock_irqrestore(&ioctx->spinlock, flags);
768 return previous == old;
769}
770
771/**
772 * srpt_post_recv() - Post an IB receive request.
773 */
774static int srpt_post_recv(struct srpt_device *sdev,
775 struct srpt_recv_ioctx *ioctx)
776{
777 struct ib_sge list;
778 struct ib_recv_wr wr, *bad_wr;
779
780 BUG_ON(!sdev);
781 wr.wr_id = encode_wr_id(SRPT_RECV, ioctx->ioctx.index);
782
783 list.addr = ioctx->ioctx.dma;
784 list.length = srp_max_req_size;
785 list.lkey = sdev->mr->lkey;
786
787 wr.next = NULL;
788 wr.sg_list = &list;
789 wr.num_sge = 1;
790
791 return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
792}
793
794/**
795 * srpt_post_send() - Post an IB send request.
796 *
797 * Returns zero upon success and a non-zero value upon failure.
798 */
799static int srpt_post_send(struct srpt_rdma_ch *ch,
800 struct srpt_send_ioctx *ioctx, int len)
801{
802 struct ib_sge list;
803 struct ib_send_wr wr, *bad_wr;
804 struct srpt_device *sdev = ch->sport->sdev;
805 int ret;
806
807 atomic_inc(&ch->req_lim);
808
809 ret = -ENOMEM;
810 if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
811 printk(KERN_WARNING "IB send queue full (needed 1)\n");
812 goto out;
813 }
814
815 ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
816 DMA_TO_DEVICE);
817
818 list.addr = ioctx->ioctx.dma;
819 list.length = len;
820 list.lkey = sdev->mr->lkey;
821
822 wr.next = NULL;
823 wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index);
824 wr.sg_list = &list;
825 wr.num_sge = 1;
826 wr.opcode = IB_WR_SEND;
827 wr.send_flags = IB_SEND_SIGNALED;
828
829 ret = ib_post_send(ch->qp, &wr, &bad_wr);
830
831out:
832 if (ret < 0) {
833 atomic_inc(&ch->sq_wr_avail);
834 atomic_dec(&ch->req_lim);
835 }
836 return ret;
837}
838
839/**
840 * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
841 * @ioctx: Pointer to the I/O context associated with the request.
842 * @srp_cmd: Pointer to the SRP_CMD request data.
843 * @dir: Pointer to the variable to which the transfer direction will be
844 * written.
845 * @data_len: Pointer to the variable to which the total data length of all
846 * descriptors in the SRP_CMD request will be written.
847 *
848 * This function initializes ioctx->nrbuf and ioctx->r_bufs.
849 *
850 * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
851 * -ENOMEM when memory allocation fails and zero upon success.
852 */
853static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
854 struct srp_cmd *srp_cmd,
855 enum dma_data_direction *dir, u64 *data_len)
856{
857 struct srp_indirect_buf *idb;
858 struct srp_direct_buf *db;
859 unsigned add_cdb_offset;
860 int ret;
861
862 /*
863 * The pointer computations below will only be compiled correctly
864 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
865 * whether srp_cmd::add_data has been declared as a byte pointer.
866 */
867 BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
868 && !__same_type(srp_cmd->add_data[0], (u8)0));
869
870 BUG_ON(!dir);
871 BUG_ON(!data_len);
872
873 ret = 0;
874 *data_len = 0;
875
876 /*
877 * The lower four bits of the buffer format field contain the DATA-IN
878 * buffer descriptor format, and the highest four bits contain the
879 * DATA-OUT buffer descriptor format.
880 */
881 *dir = DMA_NONE;
882 if (srp_cmd->buf_fmt & 0xf)
883 /* DATA-IN: transfer data from target to initiator (read). */
884 *dir = DMA_FROM_DEVICE;
885 else if (srp_cmd->buf_fmt >> 4)
886 /* DATA-OUT: transfer data from initiator to target (write). */
887 *dir = DMA_TO_DEVICE;
888
889 /*
890 * According to the SRP spec, the lower two bits of the 'ADDITIONAL
891 * CDB LENGTH' field are reserved and the size in bytes of this field
892 * is four times the value specified in bits 3..7. Hence the "& ~3".
893 */
894 add_cdb_offset = srp_cmd->add_cdb_len & ~3;
895 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
896 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
897 ioctx->n_rbuf = 1;
898 ioctx->rbufs = &ioctx->single_rbuf;
899
900 db = (struct srp_direct_buf *)(srp_cmd->add_data
901 + add_cdb_offset);
902 memcpy(ioctx->rbufs, db, sizeof *db);
903 *data_len = be32_to_cpu(db->len);
904 } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
905 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
906 idb = (struct srp_indirect_buf *)(srp_cmd->add_data
907 + add_cdb_offset);
908
909 ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
910
911 if (ioctx->n_rbuf >
912 (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
913 printk(KERN_ERR "received unsupported SRP_CMD request"
914 " type (%u out + %u in != %u / %zu)\n",
915 srp_cmd->data_out_desc_cnt,
916 srp_cmd->data_in_desc_cnt,
917 be32_to_cpu(idb->table_desc.len),
918 sizeof(*db));
919 ioctx->n_rbuf = 0;
920 ret = -EINVAL;
921 goto out;
922 }
923
924 if (ioctx->n_rbuf == 1)
925 ioctx->rbufs = &ioctx->single_rbuf;
926 else {
927 ioctx->rbufs =
928 kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
929 if (!ioctx->rbufs) {
930 ioctx->n_rbuf = 0;
931 ret = -ENOMEM;
932 goto out;
933 }
934 }
935
936 db = idb->desc_list;
937 memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
938 *data_len = be32_to_cpu(idb->len);
939 }
940out:
941 return ret;
942}
943
944/**
945 * srpt_init_ch_qp() - Initialize queue pair attributes.
946 *
947 * Initialized the attributes of queue pair 'qp' by allowing local write,
948 * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
949 */
950static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
951{
952 struct ib_qp_attr *attr;
953 int ret;
954
955 attr = kzalloc(sizeof *attr, GFP_KERNEL);
956 if (!attr)
957 return -ENOMEM;
958
959 attr->qp_state = IB_QPS_INIT;
960 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
961 IB_ACCESS_REMOTE_WRITE;
962 attr->port_num = ch->sport->port;
963 attr->pkey_index = 0;
964
965 ret = ib_modify_qp(qp, attr,
966 IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
967 IB_QP_PKEY_INDEX);
968
969 kfree(attr);
970 return ret;
971}
972
973/**
974 * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
975 * @ch: channel of the queue pair.
976 * @qp: queue pair to change the state of.
977 *
978 * Returns zero upon success and a negative value upon failure.
979 *
980 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
981 * If this structure ever becomes larger, it might be necessary to allocate
982 * it dynamically instead of on the stack.
983 */
984static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
985{
986 struct ib_qp_attr qp_attr;
987 int attr_mask;
988 int ret;
989
990 qp_attr.qp_state = IB_QPS_RTR;
991 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
992 if (ret)
993 goto out;
994
995 qp_attr.max_dest_rd_atomic = 4;
996
997 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
998
999out:
1000 return ret;
1001}
1002
1003/**
1004 * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
1005 * @ch: channel of the queue pair.
1006 * @qp: queue pair to change the state of.
1007 *
1008 * Returns zero upon success and a negative value upon failure.
1009 *
1010 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
1011 * If this structure ever becomes larger, it might be necessary to allocate
1012 * it dynamically instead of on the stack.
1013 */
1014static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1015{
1016 struct ib_qp_attr qp_attr;
1017 int attr_mask;
1018 int ret;
1019
1020 qp_attr.qp_state = IB_QPS_RTS;
1021 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
1022 if (ret)
1023 goto out;
1024
1025 qp_attr.max_rd_atomic = 4;
1026
1027 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1028
1029out:
1030 return ret;
1031}
1032
1033/**
1034 * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
1035 */
1036static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
1037{
1038 struct ib_qp_attr qp_attr;
1039
1040 qp_attr.qp_state = IB_QPS_ERR;
1041 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
1042}
1043
1044/**
1045 * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
1046 */
1047static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1048 struct srpt_send_ioctx *ioctx)
1049{
1050 struct scatterlist *sg;
1051 enum dma_data_direction dir;
1052
1053 BUG_ON(!ch);
1054 BUG_ON(!ioctx);
1055 BUG_ON(ioctx->n_rdma && !ioctx->rdma_ius);
1056
1057 while (ioctx->n_rdma)
1058 kfree(ioctx->rdma_ius[--ioctx->n_rdma].sge);
1059
1060 kfree(ioctx->rdma_ius);
1061 ioctx->rdma_ius = NULL;
1062
1063 if (ioctx->mapped_sg_count) {
1064 sg = ioctx->sg;
1065 WARN_ON(!sg);
1066 dir = ioctx->cmd.data_direction;
1067 BUG_ON(dir == DMA_NONE);
1068 ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
1069 opposite_dma_dir(dir));
1070 ioctx->mapped_sg_count = 0;
1071 }
1072}
1073
1074/**
1075 * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
1076 */
1077static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1078 struct srpt_send_ioctx *ioctx)
1079{
1080 struct se_cmd *cmd;
1081 struct scatterlist *sg, *sg_orig;
1082 int sg_cnt;
1083 enum dma_data_direction dir;
1084 struct rdma_iu *riu;
1085 struct srp_direct_buf *db;
1086 dma_addr_t dma_addr;
1087 struct ib_sge *sge;
1088 u64 raddr;
1089 u32 rsize;
1090 u32 tsize;
1091 u32 dma_len;
1092 int count, nrdma;
1093 int i, j, k;
1094
1095 BUG_ON(!ch);
1096 BUG_ON(!ioctx);
1097 cmd = &ioctx->cmd;
1098 dir = cmd->data_direction;
1099 BUG_ON(dir == DMA_NONE);
1100
1101 transport_do_task_sg_chain(cmd);
1102 ioctx->sg = sg = sg_orig = cmd->t_tasks_sg_chained;
1103 ioctx->sg_cnt = sg_cnt = cmd->t_tasks_sg_chained_no;
1104
1105 count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
1106 opposite_dma_dir(dir));
1107 if (unlikely(!count))
1108 return -EAGAIN;
1109
1110 ioctx->mapped_sg_count = count;
1111
1112 if (ioctx->rdma_ius && ioctx->n_rdma_ius)
1113 nrdma = ioctx->n_rdma_ius;
1114 else {
1115 nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
1116 + ioctx->n_rbuf;
1117
1118 ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu, GFP_KERNEL);
1119 if (!ioctx->rdma_ius)
1120 goto free_mem;
1121
1122 ioctx->n_rdma_ius = nrdma;
1123 }
1124
1125 db = ioctx->rbufs;
1126 tsize = cmd->data_length;
1127 dma_len = sg_dma_len(&sg[0]);
1128 riu = ioctx->rdma_ius;
1129
1130 /*
1131 * For each remote desc - calculate the #ib_sge.
1132 * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
1133 * each remote desc rdma_iu is required a rdma wr;
1134 * else
1135 * we need to allocate extra rdma_iu to carry extra #ib_sge in
1136 * another rdma wr
1137 */
1138 for (i = 0, j = 0;
1139 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1140 rsize = be32_to_cpu(db->len);
1141 raddr = be64_to_cpu(db->va);
1142 riu->raddr = raddr;
1143 riu->rkey = be32_to_cpu(db->key);
1144 riu->sge_cnt = 0;
1145
1146 /* calculate how many sge required for this remote_buf */
1147 while (rsize > 0 && tsize > 0) {
1148
1149 if (rsize >= dma_len) {
1150 tsize -= dma_len;
1151 rsize -= dma_len;
1152 raddr += dma_len;
1153
1154 if (tsize > 0) {
1155 ++j;
1156 if (j < count) {
1157 sg = sg_next(sg);
1158 dma_len = sg_dma_len(sg);
1159 }
1160 }
1161 } else {
1162 tsize -= rsize;
1163 dma_len -= rsize;
1164 rsize = 0;
1165 }
1166
1167 ++riu->sge_cnt;
1168
1169 if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) {
1170 ++ioctx->n_rdma;
1171 riu->sge =
1172 kmalloc(riu->sge_cnt * sizeof *riu->sge,
1173 GFP_KERNEL);
1174 if (!riu->sge)
1175 goto free_mem;
1176
1177 ++riu;
1178 riu->sge_cnt = 0;
1179 riu->raddr = raddr;
1180 riu->rkey = be32_to_cpu(db->key);
1181 }
1182 }
1183
1184 ++ioctx->n_rdma;
1185 riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge,
1186 GFP_KERNEL);
1187 if (!riu->sge)
1188 goto free_mem;
1189 }
1190
1191 db = ioctx->rbufs;
1192 tsize = cmd->data_length;
1193 riu = ioctx->rdma_ius;
1194 sg = sg_orig;
1195 dma_len = sg_dma_len(&sg[0]);
1196 dma_addr = sg_dma_address(&sg[0]);
1197
1198 /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
1199 for (i = 0, j = 0;
1200 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1201 rsize = be32_to_cpu(db->len);
1202 sge = riu->sge;
1203 k = 0;
1204
1205 while (rsize > 0 && tsize > 0) {
1206 sge->addr = dma_addr;
1207 sge->lkey = ch->sport->sdev->mr->lkey;
1208
1209 if (rsize >= dma_len) {
1210 sge->length =
1211 (tsize < dma_len) ? tsize : dma_len;
1212 tsize -= dma_len;
1213 rsize -= dma_len;
1214
1215 if (tsize > 0) {
1216 ++j;
1217 if (j < count) {
1218 sg = sg_next(sg);
1219 dma_len = sg_dma_len(sg);
1220 dma_addr = sg_dma_address(sg);
1221 }
1222 }
1223 } else {
1224 sge->length = (tsize < rsize) ? tsize : rsize;
1225 tsize -= rsize;
1226 dma_len -= rsize;
1227 dma_addr += rsize;
1228 rsize = 0;
1229 }
1230
1231 ++k;
1232 if (k == riu->sge_cnt && rsize > 0 && tsize > 0) {
1233 ++riu;
1234 sge = riu->sge;
1235 k = 0;
1236 } else if (rsize > 0 && tsize > 0)
1237 ++sge;
1238 }
1239 }
1240
1241 return 0;
1242
1243free_mem:
1244 srpt_unmap_sg_to_ib_sge(ch, ioctx);
1245
1246 return -ENOMEM;
1247}
1248
1249/**
1250 * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
1251 */
1252static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1253{
1254 struct srpt_send_ioctx *ioctx;
1255 unsigned long flags;
1256
1257 BUG_ON(!ch);
1258
1259 ioctx = NULL;
1260 spin_lock_irqsave(&ch->spinlock, flags);
1261 if (!list_empty(&ch->free_list)) {
1262 ioctx = list_first_entry(&ch->free_list,
1263 struct srpt_send_ioctx, free_list);
1264 list_del(&ioctx->free_list);
1265 }
1266 spin_unlock_irqrestore(&ch->spinlock, flags);
1267
1268 if (!ioctx)
1269 return ioctx;
1270
1271 BUG_ON(ioctx->ch != ch);
1272 kref_init(&ioctx->kref);
1273 spin_lock_init(&ioctx->spinlock);
1274 ioctx->state = SRPT_STATE_NEW;
1275 ioctx->n_rbuf = 0;
1276 ioctx->rbufs = NULL;
1277 ioctx->n_rdma = 0;
1278 ioctx->n_rdma_ius = 0;
1279 ioctx->rdma_ius = NULL;
1280 ioctx->mapped_sg_count = 0;
1281 init_completion(&ioctx->tx_done);
1282 ioctx->queue_status_only = false;
1283 /*
1284 * transport_init_se_cmd() does not initialize all fields, so do it
1285 * here.
1286 */
1287 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1288 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1289
1290 return ioctx;
1291}
1292
1293/**
1294 * srpt_put_send_ioctx() - Free up resources.
1295 */
1296static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
1297{
1298 struct srpt_rdma_ch *ch;
1299 unsigned long flags;
1300
1301 BUG_ON(!ioctx);
1302 ch = ioctx->ch;
1303 BUG_ON(!ch);
1304
1305 WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE);
1306
1307 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
1308 transport_generic_free_cmd(&ioctx->cmd, 0);
1309
1310 if (ioctx->n_rbuf > 1) {
1311 kfree(ioctx->rbufs);
1312 ioctx->rbufs = NULL;
1313 ioctx->n_rbuf = 0;
1314 }
1315
1316 spin_lock_irqsave(&ch->spinlock, flags);
1317 list_add(&ioctx->free_list, &ch->free_list);
1318 spin_unlock_irqrestore(&ch->spinlock, flags);
1319}
1320
1321static void srpt_put_send_ioctx_kref(struct kref *kref)
1322{
1323 srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref));
1324}
1325
1326/**
1327 * srpt_abort_cmd() - Abort a SCSI command.
1328 * @ioctx: I/O context associated with the SCSI command.
1329 * @context: Preferred execution context.
1330 */
1331static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1332{
1333 enum srpt_command_state state;
1334 unsigned long flags;
1335
1336 BUG_ON(!ioctx);
1337
1338 /*
1339 * If the command is in a state where the target core is waiting for
1340 * the ib_srpt driver, change the state to the next state. Changing
1341 * the state of the command from SRPT_STATE_NEED_DATA to
1342 * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this
1343 * function a second time.
1344 */
1345
1346 spin_lock_irqsave(&ioctx->spinlock, flags);
1347 state = ioctx->state;
1348 switch (state) {
1349 case SRPT_STATE_NEED_DATA:
1350 ioctx->state = SRPT_STATE_DATA_IN;
1351 break;
1352 case SRPT_STATE_DATA_IN:
1353 case SRPT_STATE_CMD_RSP_SENT:
1354 case SRPT_STATE_MGMT_RSP_SENT:
1355 ioctx->state = SRPT_STATE_DONE;
1356 break;
1357 default:
1358 break;
1359 }
1360 spin_unlock_irqrestore(&ioctx->spinlock, flags);
1361
1362 if (state == SRPT_STATE_DONE)
1363 goto out;
1364
1365 pr_debug("Aborting cmd with state %d and tag %lld\n", state,
1366 ioctx->tag);
1367
1368 switch (state) {
1369 case SRPT_STATE_NEW:
1370 case SRPT_STATE_DATA_IN:
1371 case SRPT_STATE_MGMT:
1372 /*
1373 * Do nothing - defer abort processing until
1374 * srpt_queue_response() is invoked.
1375 */
1376 WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false));
1377 break;
1378 case SRPT_STATE_NEED_DATA:
1379 /* DMA_TO_DEVICE (write) - RDMA read error. */
1380 atomic_set(&ioctx->cmd.transport_lun_stop, 1);
1381 transport_generic_handle_data(&ioctx->cmd);
1382 break;
1383 case SRPT_STATE_CMD_RSP_SENT:
1384 /*
1385 * SRP_RSP sending failed or the SRP_RSP send completion has
1386 * not been received in time.
1387 */
1388 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
1389 atomic_set(&ioctx->cmd.transport_lun_stop, 1);
1390 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
1391 break;
1392 case SRPT_STATE_MGMT_RSP_SENT:
1393 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1394 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
1395 break;
1396 default:
1397 WARN_ON("ERROR: unexpected command state");
1398 break;
1399 }
1400
1401out:
1402 return state;
1403}
1404
1405/**
1406 * srpt_handle_send_err_comp() - Process an IB_WC_SEND error completion.
1407 */
1408static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id)
1409{
1410 struct srpt_send_ioctx *ioctx;
1411 enum srpt_command_state state;
1412 struct se_cmd *cmd;
1413 u32 index;
1414
1415 atomic_inc(&ch->sq_wr_avail);
1416
1417 index = idx_from_wr_id(wr_id);
1418 ioctx = ch->ioctx_ring[index];
1419 state = srpt_get_cmd_state(ioctx);
1420 cmd = &ioctx->cmd;
1421
1422 WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
1423 && state != SRPT_STATE_MGMT_RSP_SENT
1424 && state != SRPT_STATE_NEED_DATA
1425 && state != SRPT_STATE_DONE);
1426
1427 /* If SRP_RSP sending failed, undo the ch->req_lim change. */
1428 if (state == SRPT_STATE_CMD_RSP_SENT
1429 || state == SRPT_STATE_MGMT_RSP_SENT)
1430 atomic_dec(&ch->req_lim);
1431
1432 srpt_abort_cmd(ioctx);
1433}
1434
1435/**
1436 * srpt_handle_send_comp() - Process an IB send completion notification.
1437 */
1438static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
1439 struct srpt_send_ioctx *ioctx)
1440{
1441 enum srpt_command_state state;
1442
1443 atomic_inc(&ch->sq_wr_avail);
1444
1445 state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1446
1447 if (WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
1448 && state != SRPT_STATE_MGMT_RSP_SENT
1449 && state != SRPT_STATE_DONE))
1450 pr_debug("state = %d\n", state);
1451
1452 if (state != SRPT_STATE_DONE)
1453 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
1454 else
1455 printk(KERN_ERR "IB completion has been received too late for"
1456 " wr_id = %u.\n", ioctx->ioctx.index);
1457}
1458
1459/**
1460 * srpt_handle_rdma_comp() - Process an IB RDMA completion notification.
1461 *
1462 * Note: transport_generic_handle_data() is asynchronous so unmapping the
1463 * data that has been transferred via IB RDMA must be postponed until the
1464 * check_stop_free() callback.
1465 */
1466static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
1467 struct srpt_send_ioctx *ioctx,
1468 enum srpt_opcode opcode)
1469{
1470 WARN_ON(ioctx->n_rdma <= 0);
1471 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1472
1473 if (opcode == SRPT_RDMA_READ_LAST) {
1474 if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
1475 SRPT_STATE_DATA_IN))
1476 transport_generic_handle_data(&ioctx->cmd);
1477 else
1478 printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__,
1479 __LINE__, srpt_get_cmd_state(ioctx));
1480 } else if (opcode == SRPT_RDMA_ABORT) {
1481 ioctx->rdma_aborted = true;
1482 } else {
1483 WARN(true, "unexpected opcode %d\n", opcode);
1484 }
1485}
1486
1487/**
1488 * srpt_handle_rdma_err_comp() - Process an IB RDMA error completion.
1489 */
1490static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
1491 struct srpt_send_ioctx *ioctx,
1492 enum srpt_opcode opcode)
1493{
1494 struct se_cmd *cmd;
1495 enum srpt_command_state state;
1496
1497 cmd = &ioctx->cmd;
1498 state = srpt_get_cmd_state(ioctx);
1499 switch (opcode) {
1500 case SRPT_RDMA_READ_LAST:
1501 if (ioctx->n_rdma <= 0) {
1502 printk(KERN_ERR "Received invalid RDMA read"
1503 " error completion with idx %d\n",
1504 ioctx->ioctx.index);
1505 break;
1506 }
1507 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1508 if (state == SRPT_STATE_NEED_DATA)
1509 srpt_abort_cmd(ioctx);
1510 else
1511 printk(KERN_ERR "%s[%d]: wrong state = %d\n",
1512 __func__, __LINE__, state);
1513 break;
1514 case SRPT_RDMA_WRITE_LAST:
1515 atomic_set(&ioctx->cmd.transport_lun_stop, 1);
1516 break;
1517 default:
1518 printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
1519 __LINE__, opcode);
1520 break;
1521 }
1522}
1523
1524/**
1525 * srpt_build_cmd_rsp() - Build an SRP_RSP response.
1526 * @ch: RDMA channel through which the request has been received.
1527 * @ioctx: I/O context associated with the SRP_CMD request. The response will
1528 * be built in the buffer ioctx->buf points at and hence this function will
1529 * overwrite the request data.
1530 * @tag: tag of the request for which this response is being generated.
1531 * @status: value for the STATUS field of the SRP_RSP information unit.
1532 *
1533 * Returns the size in bytes of the SRP_RSP response.
1534 *
1535 * An SRP_RSP response contains a SCSI status or service response. See also
1536 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1537 * response. See also SPC-2 for more information about sense data.
1538 */
1539static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1540 struct srpt_send_ioctx *ioctx, u64 tag,
1541 int status)
1542{
1543 struct srp_rsp *srp_rsp;
1544 const u8 *sense_data;
1545 int sense_data_len, max_sense_len;
1546
1547 /*
1548 * The lowest bit of all SAM-3 status codes is zero (see also
1549 * paragraph 5.3 in SAM-3).
1550 */
1551 WARN_ON(status & 1);
1552
1553 srp_rsp = ioctx->ioctx.buf;
1554 BUG_ON(!srp_rsp);
1555
1556 sense_data = ioctx->sense_data;
1557 sense_data_len = ioctx->cmd.scsi_sense_length;
1558 WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
1559
1560 memset(srp_rsp, 0, sizeof *srp_rsp);
1561 srp_rsp->opcode = SRP_RSP;
1562 srp_rsp->req_lim_delta =
1563 __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1564 srp_rsp->tag = tag;
1565 srp_rsp->status = status;
1566
1567 if (sense_data_len) {
1568 BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
1569 max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
1570 if (sense_data_len > max_sense_len) {
1571 printk(KERN_WARNING "truncated sense data from %d to %d"
1572 " bytes\n", sense_data_len, max_sense_len);
1573 sense_data_len = max_sense_len;
1574 }
1575
1576 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
1577 srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
1578 memcpy(srp_rsp + 1, sense_data, sense_data_len);
1579 }
1580
1581 return sizeof(*srp_rsp) + sense_data_len;
1582}
1583
1584/**
1585 * srpt_build_tskmgmt_rsp() - Build a task management response.
1586 * @ch: RDMA channel through which the request has been received.
1587 * @ioctx: I/O context in which the SRP_RSP response will be built.
1588 * @rsp_code: RSP_CODE that will be stored in the response.
1589 * @tag: Tag of the request for which this response is being generated.
1590 *
1591 * Returns the size in bytes of the SRP_RSP response.
1592 *
1593 * An SRP_RSP response contains a SCSI status or service response. See also
1594 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1595 * response.
1596 */
1597static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1598 struct srpt_send_ioctx *ioctx,
1599 u8 rsp_code, u64 tag)
1600{
1601 struct srp_rsp *srp_rsp;
1602 int resp_data_len;
1603 int resp_len;
1604
1605 resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4;
1606 resp_len = sizeof(*srp_rsp) + resp_data_len;
1607
1608 srp_rsp = ioctx->ioctx.buf;
1609 BUG_ON(!srp_rsp);
1610 memset(srp_rsp, 0, sizeof *srp_rsp);
1611
1612 srp_rsp->opcode = SRP_RSP;
1613 srp_rsp->req_lim_delta = __constant_cpu_to_be32(1
1614 + atomic_xchg(&ch->req_lim_delta, 0));
1615 srp_rsp->tag = tag;
1616
1617 if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
1618 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1619 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1620 srp_rsp->data[3] = rsp_code;
1621 }
1622
1623 return resp_len;
1624}
1625
1626#define NO_SUCH_LUN ((uint64_t)-1LL)
1627
1628/*
1629 * SCSI LUN addressing method. See also SAM-2 and the section about
1630 * eight byte LUNs.
1631 */
1632enum scsi_lun_addr_method {
1633 SCSI_LUN_ADDR_METHOD_PERIPHERAL = 0,
1634 SCSI_LUN_ADDR_METHOD_FLAT = 1,
1635 SCSI_LUN_ADDR_METHOD_LUN = 2,
1636 SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3,
1637};
1638
1639/*
1640 * srpt_unpack_lun() - Convert from network LUN to linear LUN.
1641 *
1642 * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte
1643 * order (big endian) to a linear LUN. Supports three LUN addressing methods:
1644 * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40).
1645 */
1646static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
1647{
1648 uint64_t res = NO_SUCH_LUN;
1649 int addressing_method;
1650
1651 if (unlikely(len < 2)) {
1652 printk(KERN_ERR "Illegal LUN length %d, expected 2 bytes or "
1653 "more", len);
1654 goto out;
1655 }
1656
1657 switch (len) {
1658 case 8:
1659 if ((*((__be64 *)lun) &
1660 __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1661 goto out_err;
1662 break;
1663 case 4:
1664 if (*((__be16 *)&lun[2]) != 0)
1665 goto out_err;
1666 break;
1667 case 6:
1668 if (*((__be32 *)&lun[2]) != 0)
1669 goto out_err;
1670 break;
1671 case 2:
1672 break;
1673 default:
1674 goto out_err;
1675 }
1676
1677 addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */
1678 switch (addressing_method) {
1679 case SCSI_LUN_ADDR_METHOD_PERIPHERAL:
1680 case SCSI_LUN_ADDR_METHOD_FLAT:
1681 case SCSI_LUN_ADDR_METHOD_LUN:
1682 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
1683 break;
1684
1685 case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN:
1686 default:
1687 printk(KERN_ERR "Unimplemented LUN addressing method %u",
1688 addressing_method);
1689 break;
1690 }
1691
1692out:
1693 return res;
1694
1695out_err:
1696 printk(KERN_ERR "Support for multi-level LUNs has not yet been"
1697 " implemented");
1698 goto out;
1699}
1700
1701static int srpt_check_stop_free(struct se_cmd *cmd)
1702{
1703 struct srpt_send_ioctx *ioctx;
1704
1705 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
1706 return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
1707}
1708
1709/**
1710 * srpt_handle_cmd() - Process SRP_CMD.
1711 */
1712static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
1713 struct srpt_recv_ioctx *recv_ioctx,
1714 struct srpt_send_ioctx *send_ioctx)
1715{
1716 struct se_cmd *cmd;
1717 struct srp_cmd *srp_cmd;
1718 uint64_t unpacked_lun;
1719 u64 data_len;
1720 enum dma_data_direction dir;
1721 int ret;
1722
1723 BUG_ON(!send_ioctx);
1724
1725 srp_cmd = recv_ioctx->ioctx.buf;
1726 kref_get(&send_ioctx->kref);
1727 cmd = &send_ioctx->cmd;
1728 send_ioctx->tag = srp_cmd->tag;
1729
1730 switch (srp_cmd->task_attr) {
1731 case SRP_CMD_SIMPLE_Q:
1732 cmd->sam_task_attr = MSG_SIMPLE_TAG;
1733 break;
1734 case SRP_CMD_ORDERED_Q:
1735 default:
1736 cmd->sam_task_attr = MSG_ORDERED_TAG;
1737 break;
1738 case SRP_CMD_HEAD_OF_Q:
1739 cmd->sam_task_attr = MSG_HEAD_TAG;
1740 break;
1741 case SRP_CMD_ACA:
1742 cmd->sam_task_attr = MSG_ACA_TAG;
1743 break;
1744 }
1745
1746 ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len);
1747 if (ret) {
1748 printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
1749 srp_cmd->tag);
1750 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1751 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1752 goto send_sense;
1753 }
1754
1755 cmd->data_length = data_len;
1756 cmd->data_direction = dir;
1757 unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
1758 sizeof(srp_cmd->lun));
1759 if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0)
1760 goto send_sense;
1761 ret = transport_generic_allocate_tasks(cmd, srp_cmd->cdb);
1762 if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
1763 srpt_queue_status(cmd);
1764 else if (cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION)
1765 goto send_sense;
1766 else
1767 WARN_ON_ONCE(ret);
1768
1769 transport_handle_cdb_direct(cmd);
1770 return 0;
1771
1772send_sense:
1773 transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason,
1774 0);
1775 return -1;
1776}
1777
1778/**
1779 * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
1780 * @ch: RDMA channel of the task management request.
1781 * @fn: Task management function to perform.
1782 * @req_tag: Tag of the SRP task management request.
1783 * @mgmt_ioctx: I/O context of the task management request.
1784 *
1785 * Returns zero if the target core will process the task management
1786 * request asynchronously.
1787 *
1788 * Note: It is assumed that the initiator serializes tag-based task management
1789 * requests.
1790 */
1791static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
1792{
1793 struct srpt_device *sdev;
1794 struct srpt_rdma_ch *ch;
1795 struct srpt_send_ioctx *target;
1796 int ret, i;
1797
1798 ret = -EINVAL;
1799 ch = ioctx->ch;
1800 BUG_ON(!ch);
1801 BUG_ON(!ch->sport);
1802 sdev = ch->sport->sdev;
1803 BUG_ON(!sdev);
1804 spin_lock_irq(&sdev->spinlock);
1805 for (i = 0; i < ch->rq_size; ++i) {
1806 target = ch->ioctx_ring[i];
1807 if (target->cmd.se_lun == ioctx->cmd.se_lun &&
1808 target->tag == tag &&
1809 srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
1810 ret = 0;
1811 /* now let the target core abort &target->cmd; */
1812 break;
1813 }
1814 }
1815 spin_unlock_irq(&sdev->spinlock);
1816 return ret;
1817}
1818
1819static int srp_tmr_to_tcm(int fn)
1820{
1821 switch (fn) {
1822 case SRP_TSK_ABORT_TASK:
1823 return TMR_ABORT_TASK;
1824 case SRP_TSK_ABORT_TASK_SET:
1825 return TMR_ABORT_TASK_SET;
1826 case SRP_TSK_CLEAR_TASK_SET:
1827 return TMR_CLEAR_TASK_SET;
1828 case SRP_TSK_LUN_RESET:
1829 return TMR_LUN_RESET;
1830 case SRP_TSK_CLEAR_ACA:
1831 return TMR_CLEAR_ACA;
1832 default:
1833 return -1;
1834 }
1835}
1836
1837/**
1838 * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
1839 *
1840 * Returns 0 if and only if the request will be processed by the target core.
1841 *
1842 * For more information about SRP_TSK_MGMT information units, see also section
1843 * 6.7 in the SRP r16a document.
1844 */
1845static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1846 struct srpt_recv_ioctx *recv_ioctx,
1847 struct srpt_send_ioctx *send_ioctx)
1848{
1849 struct srp_tsk_mgmt *srp_tsk;
1850 struct se_cmd *cmd;
1851 uint64_t unpacked_lun;
1852 int tcm_tmr;
1853 int res;
1854
1855 BUG_ON(!send_ioctx);
1856
1857 srp_tsk = recv_ioctx->ioctx.buf;
1858 cmd = &send_ioctx->cmd;
1859
1860 pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
1861 " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
1862 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
1863
1864 srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
1865 send_ioctx->tag = srp_tsk->tag;
1866 tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
1867 if (tcm_tmr < 0) {
1868 send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1869 send_ioctx->cmd.se_tmr_req->response =
1870 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
1871 goto process_tmr;
1872 }
1873 cmd->se_tmr_req = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
1874 if (!cmd->se_tmr_req) {
1875 send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1876 send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
1877 goto process_tmr;
1878 }
1879
1880 unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
1881 sizeof(srp_tsk->lun));
1882 res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun);
1883 if (res) {
1884 pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun);
1885 send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1886 send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1887 goto process_tmr;
1888 }
1889
1890 if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK)
1891 srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
1892
1893process_tmr:
1894 kref_get(&send_ioctx->kref);
1895 if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
1896 transport_generic_handle_tmr(&send_ioctx->cmd);
1897 else
1898 transport_send_check_condition_and_sense(cmd,
1899 cmd->scsi_sense_reason, 0);
1900
1901}
1902
1903/**
1904 * srpt_handle_new_iu() - Process a newly received information unit.
1905 * @ch: RDMA channel through which the information unit has been received.
1906 * @ioctx: SRPT I/O context associated with the information unit.
1907 */
1908static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
1909 struct srpt_recv_ioctx *recv_ioctx,
1910 struct srpt_send_ioctx *send_ioctx)
1911{
1912 struct srp_cmd *srp_cmd;
1913 enum rdma_ch_state ch_state;
1914
1915 BUG_ON(!ch);
1916 BUG_ON(!recv_ioctx);
1917
1918 ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1919 recv_ioctx->ioctx.dma, srp_max_req_size,
1920 DMA_FROM_DEVICE);
1921
1922 ch_state = srpt_get_ch_state(ch);
1923 if (unlikely(ch_state == CH_CONNECTING)) {
1924 list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
1925 goto out;
1926 }
1927
1928 if (unlikely(ch_state != CH_LIVE))
1929 goto out;
1930
1931 srp_cmd = recv_ioctx->ioctx.buf;
1932 if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
1933 if (!send_ioctx)
1934 send_ioctx = srpt_get_send_ioctx(ch);
1935 if (unlikely(!send_ioctx)) {
1936 list_add_tail(&recv_ioctx->wait_list,
1937 &ch->cmd_wait_list);
1938 goto out;
1939 }
1940 }
1941
1942 transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess,
1943 0, DMA_NONE, MSG_SIMPLE_TAG,
1944 send_ioctx->sense_data);
1945
1946 switch (srp_cmd->opcode) {
1947 case SRP_CMD:
1948 srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
1949 break;
1950 case SRP_TSK_MGMT:
1951 srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
1952 break;
1953 case SRP_I_LOGOUT:
1954 printk(KERN_ERR "Not yet implemented: SRP_I_LOGOUT\n");
1955 break;
1956 case SRP_CRED_RSP:
1957 pr_debug("received SRP_CRED_RSP\n");
1958 break;
1959 case SRP_AER_RSP:
1960 pr_debug("received SRP_AER_RSP\n");
1961 break;
1962 case SRP_RSP:
1963 printk(KERN_ERR "Received SRP_RSP\n");
1964 break;
1965 default:
1966 printk(KERN_ERR "received IU with unknown opcode 0x%x\n",
1967 srp_cmd->opcode);
1968 break;
1969 }
1970
1971 srpt_post_recv(ch->sport->sdev, recv_ioctx);
1972out:
1973 return;
1974}
1975
1976static void srpt_process_rcv_completion(struct ib_cq *cq,
1977 struct srpt_rdma_ch *ch,
1978 struct ib_wc *wc)
1979{
1980 struct srpt_device *sdev = ch->sport->sdev;
1981 struct srpt_recv_ioctx *ioctx;
1982 u32 index;
1983
1984 index = idx_from_wr_id(wc->wr_id);
1985 if (wc->status == IB_WC_SUCCESS) {
1986 int req_lim;
1987
1988 req_lim = atomic_dec_return(&ch->req_lim);
1989 if (unlikely(req_lim < 0))
1990 printk(KERN_ERR "req_lim = %d < 0\n", req_lim);
1991 ioctx = sdev->ioctx_ring[index];
1992 srpt_handle_new_iu(ch, ioctx, NULL);
1993 } else {
1994 printk(KERN_INFO "receiving failed for idx %u with status %d\n",
1995 index, wc->status);
1996 }
1997}
1998
1999/**
2000 * srpt_process_send_completion() - Process an IB send completion.
2001 *
2002 * Note: Although this has not yet been observed during tests, at least in
2003 * theory it is possible that the srpt_get_send_ioctx() call invoked by
2004 * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
2005 * value in each response is set to one, and it is possible that this response
2006 * makes the initiator send a new request before the send completion for that
2007 * response has been processed. This could e.g. happen if the call to
2008 * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
2009 * if IB retransmission causes generation of the send completion to be
2010 * delayed. Incoming information units for which srpt_get_send_ioctx() fails
2011 * are queued on cmd_wait_list. The code below processes these delayed
2012 * requests one at a time.
2013 */
2014static void srpt_process_send_completion(struct ib_cq *cq,
2015 struct srpt_rdma_ch *ch,
2016 struct ib_wc *wc)
2017{
2018 struct srpt_send_ioctx *send_ioctx;
2019 uint32_t index;
2020 enum srpt_opcode opcode;
2021
2022 index = idx_from_wr_id(wc->wr_id);
2023 opcode = opcode_from_wr_id(wc->wr_id);
2024 send_ioctx = ch->ioctx_ring[index];
2025 if (wc->status == IB_WC_SUCCESS) {
2026 if (opcode == SRPT_SEND)
2027 srpt_handle_send_comp(ch, send_ioctx);
2028 else {
2029 WARN_ON(opcode != SRPT_RDMA_ABORT &&
2030 wc->opcode != IB_WC_RDMA_READ);
2031 srpt_handle_rdma_comp(ch, send_ioctx, opcode);
2032 }
2033 } else {
2034 if (opcode == SRPT_SEND) {
2035 printk(KERN_INFO "sending response for idx %u failed"
2036 " with status %d\n", index, wc->status);
2037 srpt_handle_send_err_comp(ch, wc->wr_id);
2038 } else if (opcode != SRPT_RDMA_MID) {
2039 printk(KERN_INFO "RDMA t %d for idx %u failed with"
2040 " status %d", opcode, index, wc->status);
2041 srpt_handle_rdma_err_comp(ch, send_ioctx, opcode);
2042 }
2043 }
2044
2045 while (unlikely(opcode == SRPT_SEND
2046 && !list_empty(&ch->cmd_wait_list)
2047 && srpt_get_ch_state(ch) == CH_LIVE
2048 && (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) {
2049 struct srpt_recv_ioctx *recv_ioctx;
2050
2051 recv_ioctx = list_first_entry(&ch->cmd_wait_list,
2052 struct srpt_recv_ioctx,
2053 wait_list);
2054 list_del(&recv_ioctx->wait_list);
2055 srpt_handle_new_iu(ch, recv_ioctx, send_ioctx);
2056 }
2057}
2058
2059static void srpt_process_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch)
2060{
2061 struct ib_wc *const wc = ch->wc;
2062 int i, n;
2063
2064 WARN_ON(cq != ch->cq);
2065
2066 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
2067 while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) {
2068 for (i = 0; i < n; i++) {
2069 if (opcode_from_wr_id(wc[i].wr_id) == SRPT_RECV)
2070 srpt_process_rcv_completion(cq, ch, &wc[i]);
2071 else
2072 srpt_process_send_completion(cq, ch, &wc[i]);
2073 }
2074 }
2075}
2076
2077/**
2078 * srpt_completion() - IB completion queue callback function.
2079 *
2080 * Notes:
2081 * - It is guaranteed that a completion handler will never be invoked
2082 * concurrently on two different CPUs for the same completion queue. See also
2083 * Documentation/infiniband/core_locking.txt and the implementation of
2084 * handle_edge_irq() in kernel/irq/chip.c.
2085 * - When threaded IRQs are enabled, completion handlers are invoked in thread
2086 * context instead of interrupt context.
2087 */
2088static void srpt_completion(struct ib_cq *cq, void *ctx)
2089{
2090 struct srpt_rdma_ch *ch = ctx;
2091
2092 wake_up_interruptible(&ch->wait_queue);
2093}
2094
2095static int srpt_compl_thread(void *arg)
2096{
2097 struct srpt_rdma_ch *ch;
2098
2099 /* Hibernation / freezing of the SRPT kernel thread is not supported. */
2100 current->flags |= PF_NOFREEZE;
2101
2102 ch = arg;
2103 BUG_ON(!ch);
2104 printk(KERN_INFO "Session %s: kernel thread %s (PID %d) started\n",
2105 ch->sess_name, ch->thread->comm, current->pid);
2106 while (!kthread_should_stop()) {
2107 wait_event_interruptible(ch->wait_queue,
2108 (srpt_process_completion(ch->cq, ch),
2109 kthread_should_stop()));
2110 }
2111 printk(KERN_INFO "Session %s: kernel thread %s (PID %d) stopped\n",
2112 ch->sess_name, ch->thread->comm, current->pid);
2113 return 0;
2114}
2115
2116/**
2117 * srpt_create_ch_ib() - Create receive and send completion queues.
2118 */
2119static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
2120{
2121 struct ib_qp_init_attr *qp_init;
2122 struct srpt_port *sport = ch->sport;
2123 struct srpt_device *sdev = sport->sdev;
2124 u32 srp_sq_size = sport->port_attrib.srp_sq_size;
2125 int ret;
2126
2127 WARN_ON(ch->rq_size < 1);
2128
2129 ret = -ENOMEM;
2130 qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
2131 if (!qp_init)
2132 goto out;
2133
2134 ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
2135 ch->rq_size + srp_sq_size, 0);
2136 if (IS_ERR(ch->cq)) {
2137 ret = PTR_ERR(ch->cq);
2138 printk(KERN_ERR "failed to create CQ cqe= %d ret= %d\n",
2139 ch->rq_size + srp_sq_size, ret);
2140 goto out;
2141 }
2142
2143 qp_init->qp_context = (void *)ch;
2144 qp_init->event_handler
2145 = (void(*)(struct ib_event *, void*))srpt_qp_event;
2146 qp_init->send_cq = ch->cq;
2147 qp_init->recv_cq = ch->cq;
2148 qp_init->srq = sdev->srq;
2149 qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
2150 qp_init->qp_type = IB_QPT_RC;
2151 qp_init->cap.max_send_wr = srp_sq_size;
2152 qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
2153
2154 ch->qp = ib_create_qp(sdev->pd, qp_init);
2155 if (IS_ERR(ch->qp)) {
2156 ret = PTR_ERR(ch->qp);
2157 printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
2158 goto err_destroy_cq;
2159 }
2160
2161 atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
2162
2163 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
2164 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
2165 qp_init->cap.max_send_wr, ch->cm_id);
2166
2167 ret = srpt_init_ch_qp(ch, ch->qp);
2168 if (ret)
2169 goto err_destroy_qp;
2170
2171 init_waitqueue_head(&ch->wait_queue);
2172
2173 pr_debug("creating thread for session %s\n", ch->sess_name);
2174
2175 ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl");
2176 if (IS_ERR(ch->thread)) {
2177 printk(KERN_ERR "failed to create kernel thread %ld\n",
2178 PTR_ERR(ch->thread));
2179 ch->thread = NULL;
2180 goto err_destroy_qp;
2181 }
2182
2183out:
2184 kfree(qp_init);
2185 return ret;
2186
2187err_destroy_qp:
2188 ib_destroy_qp(ch->qp);
2189err_destroy_cq:
2190 ib_destroy_cq(ch->cq);
2191 goto out;
2192}
2193
2194static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
2195{
2196 if (ch->thread)
2197 kthread_stop(ch->thread);
2198
2199 ib_destroy_qp(ch->qp);
2200 ib_destroy_cq(ch->cq);
2201}
2202
2203/**
2204 * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
2205 *
2206 * Reset the QP and make sure all resources associated with the channel will
2207 * be deallocated at an appropriate time.
2208 *
2209 * Note: The caller must hold ch->sport->sdev->spinlock.
2210 */
2211static void __srpt_close_ch(struct srpt_rdma_ch *ch)
2212{
2213 struct srpt_device *sdev;
2214 enum rdma_ch_state prev_state;
2215 unsigned long flags;
2216
2217 sdev = ch->sport->sdev;
2218
2219 spin_lock_irqsave(&ch->spinlock, flags);
2220 prev_state = ch->state;
2221 switch (prev_state) {
2222 case CH_CONNECTING:
2223 case CH_LIVE:
2224 ch->state = CH_DISCONNECTING;
2225 break;
2226 default:
2227 break;
2228 }
2229 spin_unlock_irqrestore(&ch->spinlock, flags);
2230
2231 switch (prev_state) {
2232 case CH_CONNECTING:
2233 ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
2234 NULL, 0);
2235 /* fall through */
2236 case CH_LIVE:
2237 if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
2238 printk(KERN_ERR "sending CM DREQ failed.\n");
2239 break;
2240 case CH_DISCONNECTING:
2241 break;
2242 case CH_DRAINING:
2243 case CH_RELEASING:
2244 break;
2245 }
2246}
2247
2248/**
2249 * srpt_close_ch() - Close an RDMA channel.
2250 */
2251static void srpt_close_ch(struct srpt_rdma_ch *ch)
2252{
2253 struct srpt_device *sdev;
2254
2255 sdev = ch->sport->sdev;
2256 spin_lock_irq(&sdev->spinlock);
2257 __srpt_close_ch(ch);
2258 spin_unlock_irq(&sdev->spinlock);
2259}
2260
2261/**
2262 * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
2263 * @cm_id: Pointer to the CM ID of the channel to be drained.
2264 *
2265 * Note: Must be called from inside srpt_cm_handler to avoid a race between
2266 * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
2267 * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
2268 * waits until all target sessions for the associated IB device have been
2269 * unregistered and target session registration involves a call to
2270 * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
2271 * this function has finished).
2272 */
2273static void srpt_drain_channel(struct ib_cm_id *cm_id)
2274{
2275 struct srpt_device *sdev;
2276 struct srpt_rdma_ch *ch;
2277 int ret;
2278 bool do_reset = false;
2279
2280 WARN_ON_ONCE(irqs_disabled());
2281
2282 sdev = cm_id->context;
2283 BUG_ON(!sdev);
2284 spin_lock_irq(&sdev->spinlock);
2285 list_for_each_entry(ch, &sdev->rch_list, list) {
2286 if (ch->cm_id == cm_id) {
2287 do_reset = srpt_test_and_set_ch_state(ch,
2288 CH_CONNECTING, CH_DRAINING) ||
2289 srpt_test_and_set_ch_state(ch,
2290 CH_LIVE, CH_DRAINING) ||
2291 srpt_test_and_set_ch_state(ch,
2292 CH_DISCONNECTING, CH_DRAINING);
2293 break;
2294 }
2295 }
2296 spin_unlock_irq(&sdev->spinlock);
2297
2298 if (do_reset) {
2299 ret = srpt_ch_qp_err(ch);
2300 if (ret < 0)
2301 printk(KERN_ERR "Setting queue pair in error state"
2302 " failed: %d\n", ret);
2303 }
2304}
2305
2306/**
2307 * srpt_find_channel() - Look up an RDMA channel.
2308 * @cm_id: Pointer to the CM ID of the channel to be looked up.
2309 *
2310 * Return NULL if no matching RDMA channel has been found.
2311 */
2312static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
2313 struct ib_cm_id *cm_id)
2314{
2315 struct srpt_rdma_ch *ch;
2316 bool found;
2317
2318 WARN_ON_ONCE(irqs_disabled());
2319 BUG_ON(!sdev);
2320
2321 found = false;
2322 spin_lock_irq(&sdev->spinlock);
2323 list_for_each_entry(ch, &sdev->rch_list, list) {
2324 if (ch->cm_id == cm_id) {
2325 found = true;
2326 break;
2327 }
2328 }
2329 spin_unlock_irq(&sdev->spinlock);
2330
2331 return found ? ch : NULL;
2332}
2333
2334/**
2335 * srpt_release_channel() - Release channel resources.
2336 *
2337 * Schedules the actual release because:
2338 * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would
2339 * trigger a deadlock.
2340 * - It is not safe to call TCM transport_* functions from interrupt context.
2341 */
2342static void srpt_release_channel(struct srpt_rdma_ch *ch)
2343{
2344 schedule_work(&ch->release_work);
2345}
2346
2347static void srpt_release_channel_work(struct work_struct *w)
2348{
2349 struct srpt_rdma_ch *ch;
2350 struct srpt_device *sdev;
2351
2352 ch = container_of(w, struct srpt_rdma_ch, release_work);
2353 pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
2354 ch->release_done);
2355
2356 sdev = ch->sport->sdev;
2357 BUG_ON(!sdev);
2358
2359 transport_deregister_session_configfs(ch->sess);
2360 transport_deregister_session(ch->sess);
2361 ch->sess = NULL;
2362
2363 srpt_destroy_ch_ib(ch);
2364
2365 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2366 ch->sport->sdev, ch->rq_size,
2367 ch->rsp_size, DMA_TO_DEVICE);
2368
2369 spin_lock_irq(&sdev->spinlock);
2370 list_del(&ch->list);
2371 spin_unlock_irq(&sdev->spinlock);
2372
2373 ib_destroy_cm_id(ch->cm_id);
2374
2375 if (ch->release_done)
2376 complete(ch->release_done);
2377
2378 wake_up(&sdev->ch_releaseQ);
2379
2380 kfree(ch);
2381}
2382
2383static struct srpt_node_acl *__srpt_lookup_acl(struct srpt_port *sport,
2384 u8 i_port_id[16])
2385{
2386 struct srpt_node_acl *nacl;
2387
2388 list_for_each_entry(nacl, &sport->port_acl_list, list)
2389 if (memcmp(nacl->i_port_id, i_port_id,
2390 sizeof(nacl->i_port_id)) == 0)
2391 return nacl;
2392
2393 return NULL;
2394}
2395
2396static struct srpt_node_acl *srpt_lookup_acl(struct srpt_port *sport,
2397 u8 i_port_id[16])
2398{
2399 struct srpt_node_acl *nacl;
2400
2401 spin_lock_irq(&sport->port_acl_lock);
2402 nacl = __srpt_lookup_acl(sport, i_port_id);
2403 spin_unlock_irq(&sport->port_acl_lock);
2404
2405 return nacl;
2406}
2407
2408/**
2409 * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
2410 *
2411 * Ownership of the cm_id is transferred to the target session if this
2412 * functions returns zero. Otherwise the caller remains the owner of cm_id.
2413 */
2414static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2415 struct ib_cm_req_event_param *param,
2416 void *private_data)
2417{
2418 struct srpt_device *sdev = cm_id->context;
2419 struct srpt_port *sport = &sdev->port[param->port - 1];
2420 struct srp_login_req *req;
2421 struct srp_login_rsp *rsp;
2422 struct srp_login_rej *rej;
2423 struct ib_cm_rep_param *rep_param;
2424 struct srpt_rdma_ch *ch, *tmp_ch;
2425 struct srpt_node_acl *nacl;
2426 u32 it_iu_len;
2427 int i;
2428 int ret = 0;
2429
2430 WARN_ON_ONCE(irqs_disabled());
2431
2432 if (WARN_ON(!sdev || !private_data))
2433 return -EINVAL;
2434
2435 req = (struct srp_login_req *)private_data;
2436
2437 it_iu_len = be32_to_cpu(req->req_it_iu_len);
2438
2439 printk(KERN_INFO "Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
2440 " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
2441 " (guid=0x%llx:0x%llx)\n",
2442 be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
2443 be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
2444 be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
2445 be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
2446 it_iu_len,
2447 param->port,
2448 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
2449 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
2450
2451 rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
2452 rej = kzalloc(sizeof *rej, GFP_KERNEL);
2453 rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
2454
2455 if (!rsp || !rej || !rep_param) {
2456 ret = -ENOMEM;
2457 goto out;
2458 }
2459
2460 if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
2461 rej->reason = __constant_cpu_to_be32(
2462 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
2463 ret = -EINVAL;
2464 printk(KERN_ERR "rejected SRP_LOGIN_REQ because its"
2465 " length (%d bytes) is out of range (%d .. %d)\n",
2466 it_iu_len, 64, srp_max_req_size);
2467 goto reject;
2468 }
2469
2470 if (!sport->enabled) {
2471 rej->reason = __constant_cpu_to_be32(
2472 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2473 ret = -EINVAL;
2474 printk(KERN_ERR "rejected SRP_LOGIN_REQ because the target port"
2475 " has not yet been enabled\n");
2476 goto reject;
2477 }
2478
2479 if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
2480 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
2481
2482 spin_lock_irq(&sdev->spinlock);
2483
2484 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
2485 if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
2486 && !memcmp(ch->t_port_id, req->target_port_id, 16)
2487 && param->port == ch->sport->port
2488 && param->listen_id == ch->sport->sdev->cm_id
2489 && ch->cm_id) {
2490 enum rdma_ch_state ch_state;
2491
2492 ch_state = srpt_get_ch_state(ch);
2493 if (ch_state != CH_CONNECTING
2494 && ch_state != CH_LIVE)
2495 continue;
2496
2497 /* found an existing channel */
2498 pr_debug("Found existing channel %s"
2499 " cm_id= %p state= %d\n",
2500 ch->sess_name, ch->cm_id, ch_state);
2501
2502 __srpt_close_ch(ch);
2503
2504 rsp->rsp_flags =
2505 SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
2506 }
2507 }
2508
2509 spin_unlock_irq(&sdev->spinlock);
2510
2511 } else
2512 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
2513
2514 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
2515 || *(__be64 *)(req->target_port_id + 8) !=
2516 cpu_to_be64(srpt_service_guid)) {
2517 rej->reason = __constant_cpu_to_be32(
2518 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
2519 ret = -ENOMEM;
2520 printk(KERN_ERR "rejected SRP_LOGIN_REQ because it"
2521 " has an invalid target port identifier.\n");
2522 goto reject;
2523 }
2524
2525 ch = kzalloc(sizeof *ch, GFP_KERNEL);
2526 if (!ch) {
2527 rej->reason = __constant_cpu_to_be32(
2528 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2529 printk(KERN_ERR "rejected SRP_LOGIN_REQ because no memory.\n");
2530 ret = -ENOMEM;
2531 goto reject;
2532 }
2533
2534 INIT_WORK(&ch->release_work, srpt_release_channel_work);
2535 memcpy(ch->i_port_id, req->initiator_port_id, 16);
2536 memcpy(ch->t_port_id, req->target_port_id, 16);
2537 ch->sport = &sdev->port[param->port - 1];
2538 ch->cm_id = cm_id;
2539 /*
2540 * Avoid QUEUE_FULL conditions by limiting the number of buffers used
2541 * for the SRP protocol to the command queue size.
2542 */
2543 ch->rq_size = SRPT_RQ_SIZE;
2544 spin_lock_init(&ch->spinlock);
2545 ch->state = CH_CONNECTING;
2546 INIT_LIST_HEAD(&ch->cmd_wait_list);
2547 ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
2548
2549 ch->ioctx_ring = (struct srpt_send_ioctx **)
2550 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2551 sizeof(*ch->ioctx_ring[0]),
2552 ch->rsp_size, DMA_TO_DEVICE);
2553 if (!ch->ioctx_ring)
2554 goto free_ch;
2555
2556 INIT_LIST_HEAD(&ch->free_list);
2557 for (i = 0; i < ch->rq_size; i++) {
2558 ch->ioctx_ring[i]->ch = ch;
2559 list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
2560 }
2561
2562 ret = srpt_create_ch_ib(ch);
2563 if (ret) {
2564 rej->reason = __constant_cpu_to_be32(
2565 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2566 printk(KERN_ERR "rejected SRP_LOGIN_REQ because creating"
2567 " a new RDMA channel failed.\n");
2568 goto free_ring;
2569 }
2570
2571 ret = srpt_ch_qp_rtr(ch, ch->qp);
2572 if (ret) {
2573 rej->reason = __constant_cpu_to_be32(
2574 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2575 printk(KERN_ERR "rejected SRP_LOGIN_REQ because enabling"
2576 " RTR failed (error code = %d)\n", ret);
2577 goto destroy_ib;
2578 }
2579 /*
2580 * Use the initator port identifier as the session name.
2581 */
2582 snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
2583 be64_to_cpu(*(__be64 *)ch->i_port_id),
2584 be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
2585
2586 pr_debug("registering session %s\n", ch->sess_name);
2587
2588 nacl = srpt_lookup_acl(sport, ch->i_port_id);
2589 if (!nacl) {
2590 printk(KERN_INFO "Rejected login because no ACL has been"
2591 " configured yet for initiator %s.\n", ch->sess_name);
2592 rej->reason = __constant_cpu_to_be32(
2593 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2594 goto destroy_ib;
2595 }
2596
2597 ch->sess = transport_init_session();
2598 if (!ch->sess) {
2599 rej->reason = __constant_cpu_to_be32(
2600 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2601 pr_debug("Failed to create session\n");
2602 goto deregister_session;
2603 }
2604 ch->sess->se_node_acl = &nacl->nacl;
2605 transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch);
2606
2607 pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
2608 ch->sess_name, ch->cm_id);
2609
2610 /* create srp_login_response */
2611 rsp->opcode = SRP_LOGIN_RSP;
2612 rsp->tag = req->tag;
2613 rsp->max_it_iu_len = req->req_it_iu_len;
2614 rsp->max_ti_iu_len = req->req_it_iu_len;
2615 ch->max_ti_iu_len = it_iu_len;
2616 rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2617 | SRP_BUF_FORMAT_INDIRECT);
2618 rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2619 atomic_set(&ch->req_lim, ch->rq_size);
2620 atomic_set(&ch->req_lim_delta, 0);
2621
2622 /* create cm reply */
2623 rep_param->qp_num = ch->qp->qp_num;
2624 rep_param->private_data = (void *)rsp;
2625 rep_param->private_data_len = sizeof *rsp;
2626 rep_param->rnr_retry_count = 7;
2627 rep_param->flow_control = 1;
2628 rep_param->failover_accepted = 0;
2629 rep_param->srq = 1;
2630 rep_param->responder_resources = 4;
2631 rep_param->initiator_depth = 4;
2632
2633 ret = ib_send_cm_rep(cm_id, rep_param);
2634 if (ret) {
2635 printk(KERN_ERR "sending SRP_LOGIN_REQ response failed"
2636 " (error code = %d)\n", ret);
2637 goto release_channel;
2638 }
2639
2640 spin_lock_irq(&sdev->spinlock);
2641 list_add_tail(&ch->list, &sdev->rch_list);
2642 spin_unlock_irq(&sdev->spinlock);
2643
2644 goto out;
2645
2646release_channel:
2647 srpt_set_ch_state(ch, CH_RELEASING);
2648 transport_deregister_session_configfs(ch->sess);
2649
2650deregister_session:
2651 transport_deregister_session(ch->sess);
2652 ch->sess = NULL;
2653
2654destroy_ib:
2655 srpt_destroy_ch_ib(ch);
2656
2657free_ring:
2658 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2659 ch->sport->sdev, ch->rq_size,
2660 ch->rsp_size, DMA_TO_DEVICE);
2661free_ch:
2662 kfree(ch);
2663
2664reject:
2665 rej->opcode = SRP_LOGIN_REJ;
2666 rej->tag = req->tag;
2667 rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2668 | SRP_BUF_FORMAT_INDIRECT);
2669
2670 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2671 (void *)rej, sizeof *rej);
2672
2673out:
2674 kfree(rep_param);
2675 kfree(rsp);
2676 kfree(rej);
2677
2678 return ret;
2679}
2680
2681static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
2682{
2683 printk(KERN_INFO "Received IB REJ for cm_id %p.\n", cm_id);
2684 srpt_drain_channel(cm_id);
2685}
2686
2687/**
2688 * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
2689 *
2690 * An IB_CM_RTU_RECEIVED message indicates that the connection is established
2691 * and that the recipient may begin transmitting (RTU = ready to use).
2692 */
2693static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
2694{
2695 struct srpt_rdma_ch *ch;
2696 int ret;
2697
2698 ch = srpt_find_channel(cm_id->context, cm_id);
2699 BUG_ON(!ch);
2700
2701 if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) {
2702 struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
2703
2704 ret = srpt_ch_qp_rts(ch, ch->qp);
2705
2706 list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
2707 wait_list) {
2708 list_del(&ioctx->wait_list);
2709 srpt_handle_new_iu(ch, ioctx, NULL);
2710 }
2711 if (ret)
2712 srpt_close_ch(ch);
2713 }
2714}
2715
2716static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
2717{
2718 printk(KERN_INFO "Received IB TimeWait exit for cm_id %p.\n", cm_id);
2719 srpt_drain_channel(cm_id);
2720}
2721
2722static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
2723{
2724 printk(KERN_INFO "Received IB REP error for cm_id %p.\n", cm_id);
2725 srpt_drain_channel(cm_id);
2726}
2727
2728/**
2729 * srpt_cm_dreq_recv() - Process reception of a DREQ message.
2730 */
2731static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
2732{
2733 struct srpt_rdma_ch *ch;
2734 unsigned long flags;
2735 bool send_drep = false;
2736
2737 ch = srpt_find_channel(cm_id->context, cm_id);
2738 BUG_ON(!ch);
2739
2740 pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch));
2741
2742 spin_lock_irqsave(&ch->spinlock, flags);
2743 switch (ch->state) {
2744 case CH_CONNECTING:
2745 case CH_LIVE:
2746 send_drep = true;
2747 ch->state = CH_DISCONNECTING;
2748 break;
2749 case CH_DISCONNECTING:
2750 case CH_DRAINING:
2751 case CH_RELEASING:
2752 WARN(true, "unexpected channel state %d\n", ch->state);
2753 break;
2754 }
2755 spin_unlock_irqrestore(&ch->spinlock, flags);
2756
2757 if (send_drep) {
2758 if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
2759 printk(KERN_ERR "Sending IB DREP failed.\n");
2760 printk(KERN_INFO "Received DREQ and sent DREP for session %s.\n",
2761 ch->sess_name);
2762 }
2763}
2764
2765/**
2766 * srpt_cm_drep_recv() - Process reception of a DREP message.
2767 */
2768static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
2769{
2770 printk(KERN_INFO "Received InfiniBand DREP message for cm_id %p.\n",
2771 cm_id);
2772 srpt_drain_channel(cm_id);
2773}
2774
2775/**
2776 * srpt_cm_handler() - IB connection manager callback function.
2777 *
2778 * A non-zero return value will cause the caller destroy the CM ID.
2779 *
2780 * Note: srpt_cm_handler() must only return a non-zero value when transferring
2781 * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
2782 * a non-zero value in any other case will trigger a race with the
2783 * ib_destroy_cm_id() call in srpt_release_channel().
2784 */
2785static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2786{
2787 int ret;
2788
2789 ret = 0;
2790 switch (event->event) {
2791 case IB_CM_REQ_RECEIVED:
2792 ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
2793 event->private_data);
2794 break;
2795 case IB_CM_REJ_RECEIVED:
2796 srpt_cm_rej_recv(cm_id);
2797 break;
2798 case IB_CM_RTU_RECEIVED:
2799 case IB_CM_USER_ESTABLISHED:
2800 srpt_cm_rtu_recv(cm_id);
2801 break;
2802 case IB_CM_DREQ_RECEIVED:
2803 srpt_cm_dreq_recv(cm_id);
2804 break;
2805 case IB_CM_DREP_RECEIVED:
2806 srpt_cm_drep_recv(cm_id);
2807 break;
2808 case IB_CM_TIMEWAIT_EXIT:
2809 srpt_cm_timewait_exit(cm_id);
2810 break;
2811 case IB_CM_REP_ERROR:
2812 srpt_cm_rep_error(cm_id);
2813 break;
2814 case IB_CM_DREQ_ERROR:
2815 printk(KERN_INFO "Received IB DREQ ERROR event.\n");
2816 break;
2817 case IB_CM_MRA_RECEIVED:
2818 printk(KERN_INFO "Received IB MRA event\n");
2819 break;
2820 default:
2821 printk(KERN_ERR "received unrecognized IB CM event %d\n",
2822 event->event);
2823 break;
2824 }
2825
2826 return ret;
2827}
2828
2829/**
2830 * srpt_perform_rdmas() - Perform IB RDMA.
2831 *
2832 * Returns zero upon success or a negative number upon failure.
2833 */
2834static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
2835 struct srpt_send_ioctx *ioctx)
2836{
2837 struct ib_send_wr wr;
2838 struct ib_send_wr *bad_wr;
2839 struct rdma_iu *riu;
2840 int i;
2841 int ret;
2842 int sq_wr_avail;
2843 enum dma_data_direction dir;
2844 const int n_rdma = ioctx->n_rdma;
2845
2846 dir = ioctx->cmd.data_direction;
2847 if (dir == DMA_TO_DEVICE) {
2848 /* write */
2849 ret = -ENOMEM;
2850 sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
2851 if (sq_wr_avail < 0) {
2852 printk(KERN_WARNING "IB send queue full (needed %d)\n",
2853 n_rdma);
2854 goto out;
2855 }
2856 }
2857
2858 ioctx->rdma_aborted = false;
2859 ret = 0;
2860 riu = ioctx->rdma_ius;
2861 memset(&wr, 0, sizeof wr);
2862
2863 for (i = 0; i < n_rdma; ++i, ++riu) {
2864 if (dir == DMA_FROM_DEVICE) {
2865 wr.opcode = IB_WR_RDMA_WRITE;
2866 wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
2867 SRPT_RDMA_WRITE_LAST :
2868 SRPT_RDMA_MID,
2869 ioctx->ioctx.index);
2870 } else {
2871 wr.opcode = IB_WR_RDMA_READ;
2872 wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
2873 SRPT_RDMA_READ_LAST :
2874 SRPT_RDMA_MID,
2875 ioctx->ioctx.index);
2876 }
2877 wr.next = NULL;
2878 wr.wr.rdma.remote_addr = riu->raddr;
2879 wr.wr.rdma.rkey = riu->rkey;
2880 wr.num_sge = riu->sge_cnt;
2881 wr.sg_list = riu->sge;
2882
2883 /* only get completion event for the last rdma write */
2884 if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE)
2885 wr.send_flags = IB_SEND_SIGNALED;
2886
2887 ret = ib_post_send(ch->qp, &wr, &bad_wr);
2888 if (ret)
2889 break;
2890 }
2891
2892 if (ret)
2893 printk(KERN_ERR "%s[%d]: ib_post_send() returned %d for %d/%d",
2894 __func__, __LINE__, ret, i, n_rdma);
2895 if (ret && i > 0) {
2896 wr.num_sge = 0;
2897 wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index);
2898 wr.send_flags = IB_SEND_SIGNALED;
2899 while (ch->state == CH_LIVE &&
2900 ib_post_send(ch->qp, &wr, &bad_wr) != 0) {
2901 printk(KERN_INFO "Trying to abort failed RDMA transfer [%d]",
2902 ioctx->ioctx.index);
2903 msleep(1000);
2904 }
2905 while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) {
2906 printk(KERN_INFO "Waiting until RDMA abort finished [%d]",
2907 ioctx->ioctx.index);
2908 msleep(1000);
2909 }
2910 }
2911out:
2912 if (unlikely(dir == DMA_TO_DEVICE && ret < 0))
2913 atomic_add(n_rdma, &ch->sq_wr_avail);
2914 return ret;
2915}
2916
2917/**
2918 * srpt_xfer_data() - Start data transfer from initiator to target.
2919 */
2920static int srpt_xfer_data(struct srpt_rdma_ch *ch,
2921 struct srpt_send_ioctx *ioctx)
2922{
2923 int ret;
2924
2925 ret = srpt_map_sg_to_ib_sge(ch, ioctx);
2926 if (ret) {
2927 printk(KERN_ERR "%s[%d] ret=%d\n", __func__, __LINE__, ret);
2928 goto out;
2929 }
2930
2931 ret = srpt_perform_rdmas(ch, ioctx);
2932 if (ret) {
2933 if (ret == -EAGAIN || ret == -ENOMEM)
2934 printk(KERN_INFO "%s[%d] queue full -- ret=%d\n",
2935 __func__, __LINE__, ret);
2936 else
2937 printk(KERN_ERR "%s[%d] fatal error -- ret=%d\n",
2938 __func__, __LINE__, ret);
2939 goto out_unmap;
2940 }
2941
2942out:
2943 return ret;
2944out_unmap:
2945 srpt_unmap_sg_to_ib_sge(ch, ioctx);
2946 goto out;
2947}
2948
2949static int srpt_write_pending_status(struct se_cmd *se_cmd)
2950{
2951 struct srpt_send_ioctx *ioctx;
2952
2953 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2954 return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
2955}
2956
2957/*
2958 * srpt_write_pending() - Start data transfer from initiator to target (write).
2959 */
2960static int srpt_write_pending(struct se_cmd *se_cmd)
2961{
2962 struct srpt_rdma_ch *ch;
2963 struct srpt_send_ioctx *ioctx;
2964 enum srpt_command_state new_state;
2965 enum rdma_ch_state ch_state;
2966 int ret;
2967
2968 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2969
2970 new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
2971 WARN_ON(new_state == SRPT_STATE_DONE);
2972
2973 ch = ioctx->ch;
2974 BUG_ON(!ch);
2975
2976 ch_state = srpt_get_ch_state(ch);
2977 switch (ch_state) {
2978 case CH_CONNECTING:
2979 WARN(true, "unexpected channel state %d\n", ch_state);
2980 ret = -EINVAL;
2981 goto out;
2982 case CH_LIVE:
2983 break;
2984 case CH_DISCONNECTING:
2985 case CH_DRAINING:
2986 case CH_RELEASING:
2987 pr_debug("cmd with tag %lld: channel disconnecting\n",
2988 ioctx->tag);
2989 srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
2990 ret = -EINVAL;
2991 goto out;
2992 }
2993 ret = srpt_xfer_data(ch, ioctx);
2994
2995out:
2996 return ret;
2997}
2998
2999static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
3000{
3001 switch (tcm_mgmt_status) {
3002 case TMR_FUNCTION_COMPLETE:
3003 return SRP_TSK_MGMT_SUCCESS;
3004 case TMR_FUNCTION_REJECTED:
3005 return SRP_TSK_MGMT_FUNC_NOT_SUPP;
3006 }
3007 return SRP_TSK_MGMT_FAILED;
3008}
3009
3010/**
3011 * srpt_queue_response() - Transmits the response to a SCSI command.
3012 *
3013 * Callback function called by the TCM core. Must not block since it can be
3014 * invoked on the context of the IB completion handler.
3015 */
3016static int srpt_queue_response(struct se_cmd *cmd)
3017{
3018 struct srpt_rdma_ch *ch;
3019 struct srpt_send_ioctx *ioctx;
3020 enum srpt_command_state state;
3021 unsigned long flags;
3022 int ret;
3023 enum dma_data_direction dir;
3024 int resp_len;
3025 u8 srp_tm_status;
3026
3027 ret = 0;
3028
3029 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
3030 ch = ioctx->ch;
3031 BUG_ON(!ch);
3032
3033 spin_lock_irqsave(&ioctx->spinlock, flags);
3034 state = ioctx->state;
3035 switch (state) {
3036 case SRPT_STATE_NEW:
3037 case SRPT_STATE_DATA_IN:
3038 ioctx->state = SRPT_STATE_CMD_RSP_SENT;
3039 break;
3040 case SRPT_STATE_MGMT:
3041 ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
3042 break;
3043 default:
3044 WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
3045 ch, ioctx->ioctx.index, ioctx->state);
3046 break;
3047 }
3048 spin_unlock_irqrestore(&ioctx->spinlock, flags);
3049
3050 if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
3051 || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
3052 atomic_inc(&ch->req_lim_delta);
3053 srpt_abort_cmd(ioctx);
3054 goto out;
3055 }
3056
3057 dir = ioctx->cmd.data_direction;
3058
3059 /* For read commands, transfer the data to the initiator. */
3060 if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length &&
3061 !ioctx->queue_status_only) {
3062 ret = srpt_xfer_data(ch, ioctx);
3063 if (ret) {
3064 printk(KERN_ERR "xfer_data failed for tag %llu\n",
3065 ioctx->tag);
3066 goto out;
3067 }
3068 }
3069
3070 if (state != SRPT_STATE_MGMT)
3071 resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag,
3072 cmd->scsi_status);
3073 else {
3074 srp_tm_status
3075 = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
3076 resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
3077 ioctx->tag);
3078 }
3079 ret = srpt_post_send(ch, ioctx, resp_len);
3080 if (ret) {
3081 printk(KERN_ERR "sending cmd response failed for tag %llu\n",
3082 ioctx->tag);
3083 srpt_unmap_sg_to_ib_sge(ch, ioctx);
3084 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
3085 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
3086 }
3087
3088out:
3089 return ret;
3090}
3091
3092static int srpt_queue_status(struct se_cmd *cmd)
3093{
3094 struct srpt_send_ioctx *ioctx;
3095
3096 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
3097 BUG_ON(ioctx->sense_data != cmd->sense_buffer);
3098 if (cmd->se_cmd_flags &
3099 (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
3100 WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
3101 ioctx->queue_status_only = true;
3102 return srpt_queue_response(cmd);
3103}
3104
3105static void srpt_refresh_port_work(struct work_struct *work)
3106{
3107 struct srpt_port *sport = container_of(work, struct srpt_port, work);
3108
3109 srpt_refresh_port(sport);
3110}
3111
3112static int srpt_ch_list_empty(struct srpt_device *sdev)
3113{
3114 int res;
3115
3116 spin_lock_irq(&sdev->spinlock);
3117 res = list_empty(&sdev->rch_list);
3118 spin_unlock_irq(&sdev->spinlock);
3119
3120 return res;
3121}
3122
3123/**
3124 * srpt_release_sdev() - Free the channel resources associated with a target.
3125 */
3126static int srpt_release_sdev(struct srpt_device *sdev)
3127{
3128 struct srpt_rdma_ch *ch, *tmp_ch;
3129 int res;
3130
3131 WARN_ON_ONCE(irqs_disabled());
3132
3133 BUG_ON(!sdev);
3134
3135 spin_lock_irq(&sdev->spinlock);
3136 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
3137 __srpt_close_ch(ch);
3138 spin_unlock_irq(&sdev->spinlock);
3139
3140 res = wait_event_interruptible(sdev->ch_releaseQ,
3141 srpt_ch_list_empty(sdev));
3142 if (res)
3143 printk(KERN_ERR "%s: interrupted.\n", __func__);
3144
3145 return 0;
3146}
3147
3148static struct srpt_port *__srpt_lookup_port(const char *name)
3149{
3150 struct ib_device *dev;
3151 struct srpt_device *sdev;
3152 struct srpt_port *sport;
3153 int i;
3154
3155 list_for_each_entry(sdev, &srpt_dev_list, list) {
3156 dev = sdev->device;
3157 if (!dev)
3158 continue;
3159
3160 for (i = 0; i < dev->phys_port_cnt; i++) {
3161 sport = &sdev->port[i];
3162
3163 if (!strcmp(sport->port_guid, name))
3164 return sport;
3165 }
3166 }
3167
3168 return NULL;
3169}
3170
3171static struct srpt_port *srpt_lookup_port(const char *name)
3172{
3173 struct srpt_port *sport;
3174
3175 spin_lock(&srpt_dev_lock);
3176 sport = __srpt_lookup_port(name);
3177 spin_unlock(&srpt_dev_lock);
3178
3179 return sport;
3180}
3181
3182/**
3183 * srpt_add_one() - Infiniband device addition callback function.
3184 */
3185static void srpt_add_one(struct ib_device *device)
3186{
3187 struct srpt_device *sdev;
3188 struct srpt_port *sport;
3189 struct ib_srq_init_attr srq_attr;
3190 int i;
3191
3192 pr_debug("device = %p, device->dma_ops = %p\n", device,
3193 device->dma_ops);
3194
3195 sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
3196 if (!sdev)
3197 goto err;
3198
3199 sdev->device = device;
3200 INIT_LIST_HEAD(&sdev->rch_list);
3201 init_waitqueue_head(&sdev->ch_releaseQ);
3202 spin_lock_init(&sdev->spinlock);
3203
3204 if (ib_query_device(device, &sdev->dev_attr))
3205 goto free_dev;
3206
3207 sdev->pd = ib_alloc_pd(device);
3208 if (IS_ERR(sdev->pd))
3209 goto free_dev;
3210
3211 sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
3212 if (IS_ERR(sdev->mr))
3213 goto err_pd;
3214
3215 sdev->srq_size = min(srpt_srq_size, sdev->dev_attr.max_srq_wr);
3216
3217 srq_attr.event_handler = srpt_srq_event;
3218 srq_attr.srq_context = (void *)sdev;
3219 srq_attr.attr.max_wr = sdev->srq_size;
3220 srq_attr.attr.max_sge = 1;
3221 srq_attr.attr.srq_limit = 0;
3222
3223 sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
3224 if (IS_ERR(sdev->srq))
3225 goto err_mr;
3226
3227 pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
3228 __func__, sdev->srq_size, sdev->dev_attr.max_srq_wr,
3229 device->name);
3230
3231 if (!srpt_service_guid)
3232 srpt_service_guid = be64_to_cpu(device->node_guid);
3233
3234 sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
3235 if (IS_ERR(sdev->cm_id))
3236 goto err_srq;
3237
3238 /* print out target login information */
3239 pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
3240 "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
3241 srpt_service_guid, srpt_service_guid);
3242
3243 /*
3244 * We do not have a consistent service_id (ie. also id_ext of target_id)
3245 * to identify this target. We currently use the guid of the first HCA
3246 * in the system as service_id; therefore, the target_id will change
3247 * if this HCA is gone bad and replaced by different HCA
3248 */
3249 if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0, NULL))
3250 goto err_cm;
3251
3252 INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
3253 srpt_event_handler);
3254 if (ib_register_event_handler(&sdev->event_handler))
3255 goto err_cm;
3256
3257 sdev->ioctx_ring = (struct srpt_recv_ioctx **)
3258 srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
3259 sizeof(*sdev->ioctx_ring[0]),
3260 srp_max_req_size, DMA_FROM_DEVICE);
3261 if (!sdev->ioctx_ring)
3262 goto err_event;
3263
3264 for (i = 0; i < sdev->srq_size; ++i)
3265 srpt_post_recv(sdev, sdev->ioctx_ring[i]);
3266
3267 WARN_ON(sdev->device->phys_port_cnt
3268 > sizeof(sdev->port)/sizeof(sdev->port[0]));
3269
3270 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
3271 sport = &sdev->port[i - 1];
3272 sport->sdev = sdev;
3273 sport->port = i;
3274 sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
3275 sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
3276 sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
3277 INIT_WORK(&sport->work, srpt_refresh_port_work);
3278 INIT_LIST_HEAD(&sport->port_acl_list);
3279 spin_lock_init(&sport->port_acl_lock);
3280
3281 if (srpt_refresh_port(sport)) {
3282 printk(KERN_ERR "MAD registration failed for %s-%d.\n",
3283 srpt_sdev_name(sdev), i);
3284 goto err_ring;
3285 }
3286 snprintf(sport->port_guid, sizeof(sport->port_guid),
3287 "0x%016llx%016llx",
3288 be64_to_cpu(sport->gid.global.subnet_prefix),
3289 be64_to_cpu(sport->gid.global.interface_id));
3290 }
3291
3292 spin_lock(&srpt_dev_lock);
3293 list_add_tail(&sdev->list, &srpt_dev_list);
3294 spin_unlock(&srpt_dev_lock);
3295
3296out:
3297 ib_set_client_data(device, &srpt_client, sdev);
3298 pr_debug("added %s.\n", device->name);
3299 return;
3300
3301err_ring:
3302 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
3303 sdev->srq_size, srp_max_req_size,
3304 DMA_FROM_DEVICE);
3305err_event:
3306 ib_unregister_event_handler(&sdev->event_handler);
3307err_cm:
3308 ib_destroy_cm_id(sdev->cm_id);
3309err_srq:
3310 ib_destroy_srq(sdev->srq);
3311err_mr:
3312 ib_dereg_mr(sdev->mr);
3313err_pd:
3314 ib_dealloc_pd(sdev->pd);
3315free_dev:
3316 kfree(sdev);
3317err:
3318 sdev = NULL;
3319 printk(KERN_INFO "%s(%s) failed.\n", __func__, device->name);
3320 goto out;
3321}
3322
3323/**
3324 * srpt_remove_one() - InfiniBand device removal callback function.
3325 */
3326static void srpt_remove_one(struct ib_device *device)
3327{
3328 struct srpt_device *sdev;
3329 int i;
3330
3331 sdev = ib_get_client_data(device, &srpt_client);
3332 if (!sdev) {
3333 printk(KERN_INFO "%s(%s): nothing to do.\n", __func__,
3334 device->name);
3335 return;
3336 }
3337
3338 srpt_unregister_mad_agent(sdev);
3339
3340 ib_unregister_event_handler(&sdev->event_handler);
3341
3342 /* Cancel any work queued by the just unregistered IB event handler. */
3343 for (i = 0; i < sdev->device->phys_port_cnt; i++)
3344 cancel_work_sync(&sdev->port[i].work);
3345
3346 ib_destroy_cm_id(sdev->cm_id);
3347
3348 /*
3349 * Unregistering a target must happen after destroying sdev->cm_id
3350 * such that no new SRP_LOGIN_REQ information units can arrive while
3351 * destroying the target.
3352 */
3353 spin_lock(&srpt_dev_lock);
3354 list_del(&sdev->list);
3355 spin_unlock(&srpt_dev_lock);
3356 srpt_release_sdev(sdev);
3357
3358 ib_destroy_srq(sdev->srq);
3359 ib_dereg_mr(sdev->mr);
3360 ib_dealloc_pd(sdev->pd);
3361
3362 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
3363 sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
3364 sdev->ioctx_ring = NULL;
3365 kfree(sdev);
3366}
3367
3368static struct ib_client srpt_client = {
3369 .name = DRV_NAME,
3370 .add = srpt_add_one,
3371 .remove = srpt_remove_one
3372};
3373
3374static int srpt_check_true(struct se_portal_group *se_tpg)
3375{
3376 return 1;
3377}
3378
3379static int srpt_check_false(struct se_portal_group *se_tpg)
3380{
3381 return 0;
3382}
3383
3384static char *srpt_get_fabric_name(void)
3385{
3386 return "srpt";
3387}
3388
3389static u8 srpt_get_fabric_proto_ident(struct se_portal_group *se_tpg)
3390{
3391 return SCSI_TRANSPORTID_PROTOCOLID_SRP;
3392}
3393
3394static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
3395{
3396 struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
3397
3398 return sport->port_guid;
3399}
3400
3401static u16 srpt_get_tag(struct se_portal_group *tpg)
3402{
3403 return 1;
3404}
3405
3406static u32 srpt_get_default_depth(struct se_portal_group *se_tpg)
3407{
3408 return 1;
3409}
3410
3411static u32 srpt_get_pr_transport_id(struct se_portal_group *se_tpg,
3412 struct se_node_acl *se_nacl,
3413 struct t10_pr_registration *pr_reg,
3414 int *format_code, unsigned char *buf)
3415{
3416 struct srpt_node_acl *nacl;
3417 struct spc_rdma_transport_id *tr_id;
3418
3419 nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3420 tr_id = (void *)buf;
3421 tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP;
3422 memcpy(tr_id->i_port_id, nacl->i_port_id, sizeof(tr_id->i_port_id));
3423 return sizeof(*tr_id);
3424}
3425
3426static u32 srpt_get_pr_transport_id_len(struct se_portal_group *se_tpg,
3427 struct se_node_acl *se_nacl,
3428 struct t10_pr_registration *pr_reg,
3429 int *format_code)
3430{
3431 *format_code = 0;
3432 return sizeof(struct spc_rdma_transport_id);
3433}
3434
3435static char *srpt_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
3436 const char *buf, u32 *out_tid_len,
3437 char **port_nexus_ptr)
3438{
3439 struct spc_rdma_transport_id *tr_id;
3440
3441 *port_nexus_ptr = NULL;
3442 *out_tid_len = sizeof(struct spc_rdma_transport_id);
3443 tr_id = (void *)buf;
3444 return (char *)tr_id->i_port_id;
3445}
3446
3447static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg)
3448{
3449 struct srpt_node_acl *nacl;
3450
3451 nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL);
3452 if (!nacl) {
3453 printk(KERN_ERR "Unable to alocate struct srpt_node_acl\n");
3454 return NULL;
3455 }
3456
3457 return &nacl->nacl;
3458}
3459
3460static void srpt_release_fabric_acl(struct se_portal_group *se_tpg,
3461 struct se_node_acl *se_nacl)
3462{
3463 struct srpt_node_acl *nacl;
3464
3465 nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3466 kfree(nacl);
3467}
3468
3469static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
3470{
3471 return 1;
3472}
3473
3474static void srpt_release_cmd(struct se_cmd *se_cmd)
3475{
3476}
3477
3478/**
3479 * srpt_shutdown_session() - Whether or not a session may be shut down.
3480 */
3481static int srpt_shutdown_session(struct se_session *se_sess)
3482{
3483 return true;
3484}
3485
3486/**
3487 * srpt_close_session() - Forcibly close a session.
3488 *
3489 * Callback function invoked by the TCM core to clean up sessions associated
3490 * with a node ACL when the user invokes
3491 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3492 */
3493static void srpt_close_session(struct se_session *se_sess)
3494{
3495 DECLARE_COMPLETION_ONSTACK(release_done);
3496 struct srpt_rdma_ch *ch;
3497 struct srpt_device *sdev;
3498 int res;
3499
3500 ch = se_sess->fabric_sess_ptr;
3501 WARN_ON(ch->sess != se_sess);
3502
3503 pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch));
3504
3505 sdev = ch->sport->sdev;
3506 spin_lock_irq(&sdev->spinlock);
3507 BUG_ON(ch->release_done);
3508 ch->release_done = &release_done;
3509 __srpt_close_ch(ch);
3510 spin_unlock_irq(&sdev->spinlock);
3511
3512 res = wait_for_completion_timeout(&release_done, 60 * HZ);
3513 WARN_ON(res <= 0);
3514}
3515
3516/**
3517 * To do: Find out whether stop_session() has a meaning for transports
3518 * other than iSCSI.
3519 */
3520static void srpt_stop_session(struct se_session *se_sess, int sess_sleep,
3521 int conn_sleep)
3522{
3523}
3524
3525static void srpt_reset_nexus(struct se_session *sess)
3526{
3527 printk(KERN_ERR "This is the SRP protocol, not iSCSI\n");
3528}
3529
3530static int srpt_sess_logged_in(struct se_session *se_sess)
3531{
3532 return true;
3533}
3534
3535/**
3536 * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
3537 *
3538 * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
3539 * This object represents an arbitrary integer used to uniquely identify a
3540 * particular attached remote initiator port to a particular SCSI target port
3541 * within a particular SCSI target device within a particular SCSI instance.
3542 */
3543static u32 srpt_sess_get_index(struct se_session *se_sess)
3544{
3545 return 0;
3546}
3547
3548static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
3549{
3550}
3551
3552static u32 srpt_get_task_tag(struct se_cmd *se_cmd)
3553{
3554 struct srpt_send_ioctx *ioctx;
3555
3556 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3557 return ioctx->tag;
3558}
3559
3560/* Note: only used from inside debug printk's by the TCM core. */
3561static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
3562{
3563 struct srpt_send_ioctx *ioctx;
3564
3565 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3566 return srpt_get_cmd_state(ioctx);
3567}
3568
3569static u16 srpt_set_fabric_sense_len(struct se_cmd *cmd, u32 sense_length)
3570{
3571 return 0;
3572}
3573
3574static u16 srpt_get_fabric_sense_len(void)
3575{
3576 return 0;
3577}
3578
3579static int srpt_is_state_remove(struct se_cmd *se_cmd)
3580{
3581 return 0;
3582}
3583
3584/**
3585 * srpt_parse_i_port_id() - Parse an initiator port ID.
3586 * @name: ASCII representation of a 128-bit initiator port ID.
3587 * @i_port_id: Binary 128-bit port ID.
3588 */
3589static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
3590{
3591 const char *p;
3592 unsigned len, count, leading_zero_bytes;
3593 int ret, rc;
3594
3595 p = name;
3596 if (strnicmp(p, "0x", 2) == 0)
3597 p += 2;
3598 ret = -EINVAL;
3599 len = strlen(p);
3600 if (len % 2)
3601 goto out;
3602 count = min(len / 2, 16U);
3603 leading_zero_bytes = 16 - count;
3604 memset(i_port_id, 0, leading_zero_bytes);
3605 rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
3606 if (rc < 0)
3607 pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
3608 ret = 0;
3609out:
3610 return ret;
3611}
3612
3613/*
3614 * configfs callback function invoked for
3615 * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3616 */
3617static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg,
3618 struct config_group *group,
3619 const char *name)
3620{
3621 struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
3622 struct se_node_acl *se_nacl, *se_nacl_new;
3623 struct srpt_node_acl *nacl;
3624 int ret = 0;
3625 u32 nexus_depth = 1;
3626 u8 i_port_id[16];
3627
3628 if (srpt_parse_i_port_id(i_port_id, name) < 0) {
3629 printk(KERN_ERR "invalid initiator port ID %s\n", name);
3630 ret = -EINVAL;
3631 goto err;
3632 }
3633
3634 se_nacl_new = srpt_alloc_fabric_acl(tpg);
3635 if (!se_nacl_new) {
3636 ret = -ENOMEM;
3637 goto err;
3638 }
3639 /*
3640 * nacl_new may be released by core_tpg_add_initiator_node_acl()
3641 * when converting a node ACL from demo mode to explict
3642 */
3643 se_nacl = core_tpg_add_initiator_node_acl(tpg, se_nacl_new, name,
3644 nexus_depth);
3645 if (IS_ERR(se_nacl)) {
3646 ret = PTR_ERR(se_nacl);
3647 goto err;
3648 }
3649 /* Locate our struct srpt_node_acl and set sdev and i_port_id. */
3650 nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3651 memcpy(&nacl->i_port_id[0], &i_port_id[0], 16);
3652 nacl->sport = sport;
3653
3654 spin_lock_irq(&sport->port_acl_lock);
3655 list_add_tail(&nacl->list, &sport->port_acl_list);
3656 spin_unlock_irq(&sport->port_acl_lock);
3657
3658 return se_nacl;
3659err:
3660 return ERR_PTR(ret);
3661}
3662
3663/*
3664 * configfs callback function invoked for
3665 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3666 */
3667static void srpt_drop_nodeacl(struct se_node_acl *se_nacl)
3668{
3669 struct srpt_node_acl *nacl;
3670 struct srpt_device *sdev;
3671 struct srpt_port *sport;
3672
3673 nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3674 sport = nacl->sport;
3675 sdev = sport->sdev;
3676 spin_lock_irq(&sport->port_acl_lock);
3677 list_del(&nacl->list);
3678 spin_unlock_irq(&sport->port_acl_lock);
3679 core_tpg_del_initiator_node_acl(&sport->port_tpg_1, se_nacl, 1);
3680 srpt_release_fabric_acl(NULL, se_nacl);
3681}
3682
3683static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size(
3684 struct se_portal_group *se_tpg,
3685 char *page)
3686{
3687 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3688
3689 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
3690}
3691
3692static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size(
3693 struct se_portal_group *se_tpg,
3694 const char *page,
3695 size_t count)
3696{
3697 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3698 unsigned long val;
3699 int ret;
3700
3701 ret = strict_strtoul(page, 0, &val);
3702 if (ret < 0) {
3703 pr_err("strict_strtoul() failed with ret: %d\n", ret);
3704 return -EINVAL;
3705 }
3706 if (val > MAX_SRPT_RDMA_SIZE) {
3707 pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
3708 MAX_SRPT_RDMA_SIZE);
3709 return -EINVAL;
3710 }
3711 if (val < DEFAULT_MAX_RDMA_SIZE) {
3712 pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
3713 val, DEFAULT_MAX_RDMA_SIZE);
3714 return -EINVAL;
3715 }
3716 sport->port_attrib.srp_max_rdma_size = val;
3717
3718 return count;
3719}
3720
3721TF_TPG_ATTRIB_ATTR(srpt, srp_max_rdma_size, S_IRUGO | S_IWUSR);
3722
3723static ssize_t srpt_tpg_attrib_show_srp_max_rsp_size(
3724 struct se_portal_group *se_tpg,
3725 char *page)
3726{
3727 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3728
3729 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
3730}
3731
3732static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size(
3733 struct se_portal_group *se_tpg,
3734 const char *page,
3735 size_t count)
3736{
3737 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3738 unsigned long val;
3739 int ret;
3740
3741 ret = strict_strtoul(page, 0, &val);
3742 if (ret < 0) {
3743 pr_err("strict_strtoul() failed with ret: %d\n", ret);
3744 return -EINVAL;
3745 }
3746 if (val > MAX_SRPT_RSP_SIZE) {
3747 pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
3748 MAX_SRPT_RSP_SIZE);
3749 return -EINVAL;
3750 }
3751 if (val < MIN_MAX_RSP_SIZE) {
3752 pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
3753 MIN_MAX_RSP_SIZE);
3754 return -EINVAL;
3755 }
3756 sport->port_attrib.srp_max_rsp_size = val;
3757
3758 return count;
3759}
3760
3761TF_TPG_ATTRIB_ATTR(srpt, srp_max_rsp_size, S_IRUGO | S_IWUSR);
3762
3763static ssize_t srpt_tpg_attrib_show_srp_sq_size(
3764 struct se_portal_group *se_tpg,
3765 char *page)
3766{
3767 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3768
3769 return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
3770}
3771
3772static ssize_t srpt_tpg_attrib_store_srp_sq_size(
3773 struct se_portal_group *se_tpg,
3774 const char *page,
3775 size_t count)
3776{
3777 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3778 unsigned long val;
3779 int ret;
3780
3781 ret = strict_strtoul(page, 0, &val);
3782 if (ret < 0) {
3783 pr_err("strict_strtoul() failed with ret: %d\n", ret);
3784 return -EINVAL;
3785 }
3786 if (val > MAX_SRPT_SRQ_SIZE) {
3787 pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
3788 MAX_SRPT_SRQ_SIZE);
3789 return -EINVAL;
3790 }
3791 if (val < MIN_SRPT_SRQ_SIZE) {
3792 pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
3793 MIN_SRPT_SRQ_SIZE);
3794 return -EINVAL;
3795 }
3796 sport->port_attrib.srp_sq_size = val;
3797
3798 return count;
3799}
3800
3801TF_TPG_ATTRIB_ATTR(srpt, srp_sq_size, S_IRUGO | S_IWUSR);
3802
3803static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
3804 &srpt_tpg_attrib_srp_max_rdma_size.attr,
3805 &srpt_tpg_attrib_srp_max_rsp_size.attr,
3806 &srpt_tpg_attrib_srp_sq_size.attr,
3807 NULL,
3808};
3809
3810static ssize_t srpt_tpg_show_enable(
3811 struct se_portal_group *se_tpg,
3812 char *page)
3813{
3814 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3815
3816 return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
3817}
3818
3819static ssize_t srpt_tpg_store_enable(
3820 struct se_portal_group *se_tpg,
3821 const char *page,
3822 size_t count)
3823{
3824 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3825 unsigned long tmp;
3826 int ret;
3827
3828 ret = strict_strtoul(page, 0, &tmp);
3829 if (ret < 0) {
3830 printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
3831 return -EINVAL;
3832 }
3833
3834 if ((tmp != 0) && (tmp != 1)) {
3835 printk(KERN_ERR "Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
3836 return -EINVAL;
3837 }
3838 if (tmp == 1)
3839 sport->enabled = true;
3840 else
3841 sport->enabled = false;
3842
3843 return count;
3844}
3845
3846TF_TPG_BASE_ATTR(srpt, enable, S_IRUGO | S_IWUSR);
3847
3848static struct configfs_attribute *srpt_tpg_attrs[] = {
3849 &srpt_tpg_enable.attr,
3850 NULL,
3851};
3852
3853/**
3854 * configfs callback invoked for
3855 * mkdir /sys/kernel/config/target/$driver/$port/$tpg
3856 */
3857static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
3858 struct config_group *group,
3859 const char *name)
3860{
3861 struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
3862 int res;
3863
3864 /* Initialize sport->port_wwn and sport->port_tpg_1 */
3865 res = core_tpg_register(&srpt_target->tf_ops, &sport->port_wwn,
3866 &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL);
3867 if (res)
3868 return ERR_PTR(res);
3869
3870 return &sport->port_tpg_1;
3871}
3872
3873/**
3874 * configfs callback invoked for
3875 * rmdir /sys/kernel/config/target/$driver/$port/$tpg
3876 */
3877static void srpt_drop_tpg(struct se_portal_group *tpg)
3878{
3879 struct srpt_port *sport = container_of(tpg,
3880 struct srpt_port, port_tpg_1);
3881
3882 sport->enabled = false;
3883 core_tpg_deregister(&sport->port_tpg_1);
3884}
3885
3886/**
3887 * configfs callback invoked for
3888 * mkdir /sys/kernel/config/target/$driver/$port
3889 */
3890static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
3891 struct config_group *group,
3892 const char *name)
3893{
3894 struct srpt_port *sport;
3895 int ret;
3896
3897 sport = srpt_lookup_port(name);
3898 pr_debug("make_tport(%s)\n", name);
3899 ret = -EINVAL;
3900 if (!sport)
3901 goto err;
3902
3903 return &sport->port_wwn;
3904
3905err:
3906 return ERR_PTR(ret);
3907}
3908
3909/**
3910 * configfs callback invoked for
3911 * rmdir /sys/kernel/config/target/$driver/$port
3912 */
3913static void srpt_drop_tport(struct se_wwn *wwn)
3914{
3915 struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
3916
3917 pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item));
3918}
3919
3920static ssize_t srpt_wwn_show_attr_version(struct target_fabric_configfs *tf,
3921 char *buf)
3922{
3923 return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
3924}
3925
3926TF_WWN_ATTR_RO(srpt, version);
3927
3928static struct configfs_attribute *srpt_wwn_attrs[] = {
3929 &srpt_wwn_version.attr,
3930 NULL,
3931};
3932
3933static struct target_core_fabric_ops srpt_template = {
3934 .get_fabric_name = srpt_get_fabric_name,
3935 .get_fabric_proto_ident = srpt_get_fabric_proto_ident,
3936 .tpg_get_wwn = srpt_get_fabric_wwn,
3937 .tpg_get_tag = srpt_get_tag,
3938 .tpg_get_default_depth = srpt_get_default_depth,
3939 .tpg_get_pr_transport_id = srpt_get_pr_transport_id,
3940 .tpg_get_pr_transport_id_len = srpt_get_pr_transport_id_len,
3941 .tpg_parse_pr_out_transport_id = srpt_parse_pr_out_transport_id,
3942 .tpg_check_demo_mode = srpt_check_false,
3943 .tpg_check_demo_mode_cache = srpt_check_true,
3944 .tpg_check_demo_mode_write_protect = srpt_check_true,
3945 .tpg_check_prod_mode_write_protect = srpt_check_false,
3946 .tpg_alloc_fabric_acl = srpt_alloc_fabric_acl,
3947 .tpg_release_fabric_acl = srpt_release_fabric_acl,
3948 .tpg_get_inst_index = srpt_tpg_get_inst_index,
3949 .release_cmd = srpt_release_cmd,
3950 .check_stop_free = srpt_check_stop_free,
3951 .shutdown_session = srpt_shutdown_session,
3952 .close_session = srpt_close_session,
3953 .stop_session = srpt_stop_session,
3954 .fall_back_to_erl0 = srpt_reset_nexus,
3955 .sess_logged_in = srpt_sess_logged_in,
3956 .sess_get_index = srpt_sess_get_index,
3957 .sess_get_initiator_sid = NULL,
3958 .write_pending = srpt_write_pending,
3959 .write_pending_status = srpt_write_pending_status,
3960 .set_default_node_attributes = srpt_set_default_node_attrs,
3961 .get_task_tag = srpt_get_task_tag,
3962 .get_cmd_state = srpt_get_tcm_cmd_state,
3963 .queue_data_in = srpt_queue_response,
3964 .queue_status = srpt_queue_status,
3965 .queue_tm_rsp = srpt_queue_response,
3966 .get_fabric_sense_len = srpt_get_fabric_sense_len,
3967 .set_fabric_sense_len = srpt_set_fabric_sense_len,
3968 .is_state_remove = srpt_is_state_remove,
3969 /*
3970 * Setup function pointers for generic logic in
3971 * target_core_fabric_configfs.c
3972 */
3973 .fabric_make_wwn = srpt_make_tport,
3974 .fabric_drop_wwn = srpt_drop_tport,
3975 .fabric_make_tpg = srpt_make_tpg,
3976 .fabric_drop_tpg = srpt_drop_tpg,
3977 .fabric_post_link = NULL,
3978 .fabric_pre_unlink = NULL,
3979 .fabric_make_np = NULL,
3980 .fabric_drop_np = NULL,
3981 .fabric_make_nodeacl = srpt_make_nodeacl,
3982 .fabric_drop_nodeacl = srpt_drop_nodeacl,
3983};
3984
3985/**
3986 * srpt_init_module() - Kernel module initialization.
3987 *
3988 * Note: Since ib_register_client() registers callback functions, and since at
3989 * least one of these callback functions (srpt_add_one()) calls target core
3990 * functions, this driver must be registered with the target core before
3991 * ib_register_client() is called.
3992 */
3993static int __init srpt_init_module(void)
3994{
3995 int ret;
3996
3997 ret = -EINVAL;
3998 if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
3999 printk(KERN_ERR "invalid value %d for kernel module parameter"
4000 " srp_max_req_size -- must be at least %d.\n",
4001 srp_max_req_size, MIN_MAX_REQ_SIZE);
4002 goto out;
4003 }
4004
4005 if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
4006 || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
4007 printk(KERN_ERR "invalid value %d for kernel module parameter"
4008 " srpt_srq_size -- must be in the range [%d..%d].\n",
4009 srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
4010 goto out;
4011 }
4012
4013 spin_lock_init(&srpt_dev_lock);
4014 INIT_LIST_HEAD(&srpt_dev_list);
4015
4016 ret = -ENODEV;
4017 srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt");
4018 if (!srpt_target) {
4019 printk(KERN_ERR "couldn't register\n");
4020 goto out;
4021 }
4022
4023 srpt_target->tf_ops = srpt_template;
4024
4025 /* Enable SG chaining */
4026 srpt_target->tf_ops.task_sg_chaining = true;
4027
4028 /*
4029 * Set up default attribute lists.
4030 */
4031 srpt_target->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = srpt_wwn_attrs;
4032 srpt_target->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = srpt_tpg_attrs;
4033 srpt_target->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = srpt_tpg_attrib_attrs;
4034 srpt_target->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
4035 srpt_target->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
4036 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
4037 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
4038 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
4039 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
4040
4041 ret = target_fabric_configfs_register(srpt_target);
4042 if (ret < 0) {
4043 printk(KERN_ERR "couldn't register\n");
4044 goto out_free_target;
4045 }
4046
4047 ret = ib_register_client(&srpt_client);
4048 if (ret) {
4049 printk(KERN_ERR "couldn't register IB client\n");
4050 goto out_unregister_target;
4051 }
4052
4053 return 0;
4054
4055out_unregister_target:
4056 target_fabric_configfs_deregister(srpt_target);
4057 srpt_target = NULL;
4058out_free_target:
4059 if (srpt_target)
4060 target_fabric_configfs_free(srpt_target);
4061out:
4062 return ret;
4063}
4064
4065static void __exit srpt_cleanup_module(void)
4066{
4067 ib_unregister_client(&srpt_client);
4068 target_fabric_configfs_deregister(srpt_target);
4069 srpt_target = NULL;
4070}
4071
4072module_init(srpt_init_module);
4073module_exit(srpt_cleanup_module);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
new file mode 100644
index 000000000000..b4b4bbcd7f16
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -0,0 +1,444 @@
1/*
2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
3 * Copyright (C) 2009 - 2010 Bart Van Assche <bvanassche@acm.org>.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34
35#ifndef IB_SRPT_H
36#define IB_SRPT_H
37
38#include <linux/version.h>
39#include <linux/types.h>
40#include <linux/list.h>
41#include <linux/wait.h>
42
43#include <rdma/ib_verbs.h>
44#include <rdma/ib_sa.h>
45#include <rdma/ib_cm.h>
46
47#include <scsi/srp.h>
48
49#include "ib_dm_mad.h"
50
51/*
52 * The prefix the ServiceName field must start with in the device management
53 * ServiceEntries attribute pair. See also the SRP specification.
54 */
55#define SRP_SERVICE_NAME_PREFIX "SRP.T10:"
56
57enum {
58 /*
59 * SRP IOControllerProfile attributes for SRP target ports that have
60 * not been defined in <scsi/srp.h>. Source: section B.7, table B.7
61 * in the SRP specification.
62 */
63 SRP_PROTOCOL = 0x0108,
64 SRP_PROTOCOL_VERSION = 0x0001,
65 SRP_IO_SUBCLASS = 0x609e,
66 SRP_SEND_TO_IOC = 0x01,
67 SRP_SEND_FROM_IOC = 0x02,
68 SRP_RDMA_READ_FROM_IOC = 0x08,
69 SRP_RDMA_WRITE_FROM_IOC = 0x20,
70
71 /*
72 * srp_login_cmd.req_flags bitmasks. See also table 9 in the SRP
73 * specification.
74 */
75 SRP_MTCH_ACTION = 0x03, /* MULTI-CHANNEL ACTION */
76 SRP_LOSOLNT = 0x10, /* logout solicited notification */
77 SRP_CRSOLNT = 0x20, /* credit request solicited notification */
78 SRP_AESOLNT = 0x40, /* asynchronous event solicited notification */
79
80 /*
81 * srp_cmd.sol_nt / srp_tsk_mgmt.sol_not bitmasks. See also tables
82 * 18 and 20 in the SRP specification.
83 */
84 SRP_SCSOLNT = 0x02, /* SCSOLNT = successful solicited notification */
85 SRP_UCSOLNT = 0x04, /* UCSOLNT = unsuccessful solicited notification */
86
87 /*
88 * srp_rsp.sol_not / srp_t_logout.sol_not bitmasks. See also tables
89 * 16 and 22 in the SRP specification.
90 */
91 SRP_SOLNT = 0x01, /* SOLNT = solicited notification */
92
93 /* See also table 24 in the SRP specification. */
94 SRP_TSK_MGMT_SUCCESS = 0x00,
95 SRP_TSK_MGMT_FUNC_NOT_SUPP = 0x04,
96 SRP_TSK_MGMT_FAILED = 0x05,
97
98 /* See also table 21 in the SRP specification. */
99 SRP_CMD_SIMPLE_Q = 0x0,
100 SRP_CMD_HEAD_OF_Q = 0x1,
101 SRP_CMD_ORDERED_Q = 0x2,
102 SRP_CMD_ACA = 0x4,
103
104 SRP_LOGIN_RSP_MULTICHAN_NO_CHAN = 0x0,
105 SRP_LOGIN_RSP_MULTICHAN_TERMINATED = 0x1,
106 SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
107
108 SRPT_DEF_SG_TABLESIZE = 128,
109 SRPT_DEF_SG_PER_WQE = 16,
110
111 MIN_SRPT_SQ_SIZE = 16,
112 DEF_SRPT_SQ_SIZE = 4096,
113 SRPT_RQ_SIZE = 128,
114 MIN_SRPT_SRQ_SIZE = 4,
115 DEFAULT_SRPT_SRQ_SIZE = 4095,
116 MAX_SRPT_SRQ_SIZE = 65535,
117 MAX_SRPT_RDMA_SIZE = 1U << 24,
118 MAX_SRPT_RSP_SIZE = 1024,
119
120 MIN_MAX_REQ_SIZE = 996,
121 DEFAULT_MAX_REQ_SIZE
122 = sizeof(struct srp_cmd)/*48*/
123 + sizeof(struct srp_indirect_buf)/*20*/
124 + 128 * sizeof(struct srp_direct_buf)/*16*/,
125
126 MIN_MAX_RSP_SIZE = sizeof(struct srp_rsp)/*36*/ + 4,
127 DEFAULT_MAX_RSP_SIZE = 256, /* leaves 220 bytes for sense data */
128
129 DEFAULT_MAX_RDMA_SIZE = 65536,
130};
131
132enum srpt_opcode {
133 SRPT_RECV,
134 SRPT_SEND,
135 SRPT_RDMA_MID,
136 SRPT_RDMA_ABORT,
137 SRPT_RDMA_READ_LAST,
138 SRPT_RDMA_WRITE_LAST,
139};
140
141static inline u64 encode_wr_id(u8 opcode, u32 idx)
142{
143 return ((u64)opcode << 32) | idx;
144}
145static inline enum srpt_opcode opcode_from_wr_id(u64 wr_id)
146{
147 return wr_id >> 32;
148}
149static inline u32 idx_from_wr_id(u64 wr_id)
150{
151 return (u32)wr_id;
152}
153
154struct rdma_iu {
155 u64 raddr;
156 u32 rkey;
157 struct ib_sge *sge;
158 u32 sge_cnt;
159 int mem_id;
160};
161
162/**
163 * enum srpt_command_state - SCSI command state managed by SRPT.
164 * @SRPT_STATE_NEW: New command arrived and is being processed.
165 * @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting
166 * for data arrival.
167 * @SRPT_STATE_DATA_IN: Data for the write or bidir command arrived and is
168 * being processed.
169 * @SRPT_STATE_CMD_RSP_SENT: SRP_RSP for SRP_CMD has been sent.
170 * @SRPT_STATE_MGMT: Processing a SCSI task management command.
171 * @SRPT_STATE_MGMT_RSP_SENT: SRP_RSP for SRP_TSK_MGMT has been sent.
172 * @SRPT_STATE_DONE: Command processing finished successfully, command
173 * processing has been aborted or command processing
174 * failed.
175 */
176enum srpt_command_state {
177 SRPT_STATE_NEW = 0,
178 SRPT_STATE_NEED_DATA = 1,
179 SRPT_STATE_DATA_IN = 2,
180 SRPT_STATE_CMD_RSP_SENT = 3,
181 SRPT_STATE_MGMT = 4,
182 SRPT_STATE_MGMT_RSP_SENT = 5,
183 SRPT_STATE_DONE = 6,
184};
185
186/**
187 * struct srpt_ioctx - Shared SRPT I/O context information.
188 * @buf: Pointer to the buffer.
189 * @dma: DMA address of the buffer.
190 * @index: Index of the I/O context in its ioctx_ring array.
191 */
192struct srpt_ioctx {
193 void *buf;
194 dma_addr_t dma;
195 uint32_t index;
196};
197
198/**
199 * struct srpt_recv_ioctx - SRPT receive I/O context.
200 * @ioctx: See above.
201 * @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
202 */
203struct srpt_recv_ioctx {
204 struct srpt_ioctx ioctx;
205 struct list_head wait_list;
206};
207
208/**
209 * struct srpt_send_ioctx - SRPT send I/O context.
210 * @ioctx: See above.
211 * @ch: Channel pointer.
212 * @free_list: Node in srpt_rdma_ch.free_list.
213 * @n_rbuf: Number of data buffers in the received SRP command.
214 * @rbufs: Pointer to SRP data buffer array.
215 * @single_rbuf: SRP data buffer if the command has only a single buffer.
216 * @sg: Pointer to sg-list associated with this I/O context.
217 * @sg_cnt: SG-list size.
218 * @mapped_sg_count: ib_dma_map_sg() return value.
219 * @n_rdma_ius: Number of elements in the rdma_ius array.
220 * @rdma_ius: Array with information about the RDMA mapping.
221 * @tag: Tag of the received SRP information unit.
222 * @spinlock: Protects 'state'.
223 * @state: I/O context state.
224 * @rdma_aborted: If initiating a multipart RDMA transfer failed, whether
225 * the already initiated transfers have finished.
226 * @cmd: Target core command data structure.
227 * @sense_data: SCSI sense data.
228 */
229struct srpt_send_ioctx {
230 struct srpt_ioctx ioctx;
231 struct srpt_rdma_ch *ch;
232 struct kref kref;
233 struct rdma_iu *rdma_ius;
234 struct srp_direct_buf *rbufs;
235 struct srp_direct_buf single_rbuf;
236 struct scatterlist *sg;
237 struct list_head free_list;
238 spinlock_t spinlock;
239 enum srpt_command_state state;
240 bool rdma_aborted;
241 struct se_cmd cmd;
242 struct completion tx_done;
243 u64 tag;
244 int sg_cnt;
245 int mapped_sg_count;
246 u16 n_rdma_ius;
247 u8 n_rdma;
248 u8 n_rbuf;
249 bool queue_status_only;
250 u8 sense_data[SCSI_SENSE_BUFFERSIZE];
251};
252
253/**
254 * enum rdma_ch_state - SRP channel state.
255 * @CH_CONNECTING: QP is in RTR state; waiting for RTU.
256 * @CH_LIVE: QP is in RTS state.
257 * @CH_DISCONNECTING: DREQ has been received; waiting for DREP
258 * or DREQ has been send and waiting for DREP
259 * or .
260 * @CH_DRAINING: QP is in ERR state; waiting for last WQE event.
261 * @CH_RELEASING: Last WQE event has been received; releasing resources.
262 */
263enum rdma_ch_state {
264 CH_CONNECTING,
265 CH_LIVE,
266 CH_DISCONNECTING,
267 CH_DRAINING,
268 CH_RELEASING
269};
270
271/**
272 * struct srpt_rdma_ch - RDMA channel.
273 * @wait_queue: Allows the kernel thread to wait for more work.
274 * @thread: Kernel thread that processes the IB queues associated with
275 * the channel.
276 * @cm_id: IB CM ID associated with the channel.
277 * @qp: IB queue pair used for communicating over this channel.
278 * @cq: IB completion queue for this channel.
279 * @rq_size: IB receive queue size.
280 * @rsp_size IB response message size in bytes.
281 * @sq_wr_avail: number of work requests available in the send queue.
282 * @sport: pointer to the information of the HCA port used by this
283 * channel.
284 * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
285 * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
286 * @max_ti_iu_len: maximum target-to-initiator information unit length.
287 * @req_lim: request limit: maximum number of requests that may be sent
288 * by the initiator without having received a response.
289 * @req_lim_delta: Number of credits not yet sent back to the initiator.
290 * @spinlock: Protects free_list and state.
291 * @free_list: Head of list with free send I/O contexts.
292 * @state: channel state. See also enum rdma_ch_state.
293 * @ioctx_ring: Send ring.
294 * @wc: IB work completion array for srpt_process_completion().
295 * @list: Node for insertion in the srpt_device.rch_list list.
296 * @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This
297 * list contains struct srpt_ioctx elements and is protected
298 * against concurrent modification by the cm_id spinlock.
299 * @sess: Session information associated with this SRP channel.
300 * @sess_name: Session name.
301 * @release_work: Allows scheduling of srpt_release_channel().
302 * @release_done: Enables waiting for srpt_release_channel() completion.
303 */
304struct srpt_rdma_ch {
305 wait_queue_head_t wait_queue;
306 struct task_struct *thread;
307 struct ib_cm_id *cm_id;
308 struct ib_qp *qp;
309 struct ib_cq *cq;
310 int rq_size;
311 u32 rsp_size;
312 atomic_t sq_wr_avail;
313 struct srpt_port *sport;
314 u8 i_port_id[16];
315 u8 t_port_id[16];
316 int max_ti_iu_len;
317 atomic_t req_lim;
318 atomic_t req_lim_delta;
319 spinlock_t spinlock;
320 struct list_head free_list;
321 enum rdma_ch_state state;
322 struct srpt_send_ioctx **ioctx_ring;
323 struct ib_wc wc[16];
324 struct list_head list;
325 struct list_head cmd_wait_list;
326 struct se_session *sess;
327 u8 sess_name[36];
328 struct work_struct release_work;
329 struct completion *release_done;
330};
331
332/**
333 * struct srpt_port_attib - Attributes for SRPT port
334 * @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
335 * @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
336 * @srp_sq_size: Shared receive queue (SRQ) size.
337 */
338struct srpt_port_attrib {
339 u32 srp_max_rdma_size;
340 u32 srp_max_rsp_size;
341 u32 srp_sq_size;
342};
343
344/**
345 * struct srpt_port - Information associated by SRPT with a single IB port.
346 * @sdev: backpointer to the HCA information.
347 * @mad_agent: per-port management datagram processing information.
348 * @enabled: Whether or not this target port is enabled.
349 * @port_guid: ASCII representation of Port GUID
350 * @port: one-based port number.
351 * @sm_lid: cached value of the port's sm_lid.
352 * @lid: cached value of the port's lid.
353 * @gid: cached value of the port's gid.
354 * @port_acl_lock spinlock for port_acl_list:
355 * @work: work structure for refreshing the aforementioned cached values.
356 * @port_tpg_1 Target portal group = 1 data.
357 * @port_wwn: Target core WWN data.
358 * @port_acl_list: Head of the list with all node ACLs for this port.
359 */
360struct srpt_port {
361 struct srpt_device *sdev;
362 struct ib_mad_agent *mad_agent;
363 bool enabled;
364 u8 port_guid[64];
365 u8 port;
366 u16 sm_lid;
367 u16 lid;
368 union ib_gid gid;
369 spinlock_t port_acl_lock;
370 struct work_struct work;
371 struct se_portal_group port_tpg_1;
372 struct se_wwn port_wwn;
373 struct list_head port_acl_list;
374 struct srpt_port_attrib port_attrib;
375};
376
377/**
378 * struct srpt_device - Information associated by SRPT with a single HCA.
379 * @device: Backpointer to the struct ib_device managed by the IB core.
380 * @pd: IB protection domain.
381 * @mr: L_Key (local key) with write access to all local memory.
382 * @srq: Per-HCA SRQ (shared receive queue).
383 * @cm_id: Connection identifier.
384 * @dev_attr: Attributes of the InfiniBand device as obtained during the
385 * ib_client.add() callback.
386 * @srq_size: SRQ size.
387 * @ioctx_ring: Per-HCA SRQ.
388 * @rch_list: Per-device channel list -- see also srpt_rdma_ch.list.
389 * @ch_releaseQ: Enables waiting for removal from rch_list.
390 * @spinlock: Protects rch_list and tpg.
391 * @port: Information about the ports owned by this HCA.
392 * @event_handler: Per-HCA asynchronous IB event handler.
393 * @list: Node in srpt_dev_list.
394 */
395struct srpt_device {
396 struct ib_device *device;
397 struct ib_pd *pd;
398 struct ib_mr *mr;
399 struct ib_srq *srq;
400 struct ib_cm_id *cm_id;
401 struct ib_device_attr dev_attr;
402 int srq_size;
403 struct srpt_recv_ioctx **ioctx_ring;
404 struct list_head rch_list;
405 wait_queue_head_t ch_releaseQ;
406 spinlock_t spinlock;
407 struct srpt_port port[2];
408 struct ib_event_handler event_handler;
409 struct list_head list;
410};
411
412/**
413 * struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
414 * @i_port_id: 128-bit SRP initiator port ID.
415 * @sport: port information.
416 * @nacl: Target core node ACL information.
417 * @list: Element of the per-HCA ACL list.
418 */
419struct srpt_node_acl {
420 u8 i_port_id[16];
421 struct srpt_port *sport;
422 struct se_node_acl nacl;
423 struct list_head list;
424};
425
426/*
427 * SRP-releated SCSI persistent reservation definitions.
428 *
429 * See also SPC4r28, section 7.6.1 (Protocol specific parameters introduction).
430 * See also SPC4r28, section 7.6.4.5 (TransportID for initiator ports using
431 * SCSI over an RDMA interface).
432 */
433
434enum {
435 SCSI_TRANSPORTID_PROTOCOLID_SRP = 4,
436};
437
438struct spc_rdma_transport_id {
439 uint8_t protocol_identifier;
440 uint8_t reserved[7];
441 uint8_t i_port_id[16];
442};
443
444#endif /* IB_SRPT_H */
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index c957c344233f..9ca28fced2b9 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -403,6 +403,13 @@ config LEDS_MAX8997
403 This option enables support for on-chip LED drivers on 403 This option enables support for on-chip LED drivers on
404 MAXIM MAX8997 PMIC. 404 MAXIM MAX8997 PMIC.
405 405
406config LEDS_OT200
407 tristate "LED support for the Bachmann OT200"
408 depends on LEDS_CLASS && HAS_IOMEM
409 help
410 This option enables support for the LEDs on the Bachmann OT200.
411 Say Y to enable LEDs on the Bachmann OT200.
412
406config LEDS_TRIGGERS 413config LEDS_TRIGGERS
407 bool "LED Trigger support" 414 bool "LED Trigger support"
408 depends on LEDS_CLASS 415 depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index b8a9723477f0..1fc6875a8b20 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
28obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o 28obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
29obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o 29obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
30obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o 30obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
31obj-$(CONFIG_LEDS_OT200) += leds-ot200.o
31obj-$(CONFIG_LEDS_FSG) += leds-fsg.o 32obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
32obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o 33obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
33obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o 34obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
diff --git a/drivers/leds/leds-ot200.c b/drivers/leds/leds-ot200.c
new file mode 100644
index 000000000000..c4646825a620
--- /dev/null
+++ b/drivers/leds/leds-ot200.c
@@ -0,0 +1,171 @@
1/*
2 * Bachmann ot200 leds driver.
3 *
4 * Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
5 * Christian Gmeiner <christian.gmeiner@gmail.com>
6 *
7 * License: GPL as published by the FSF.
8 */
9
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/platform_device.h>
13#include <linux/slab.h>
14#include <linux/leds.h>
15#include <linux/io.h>
16#include <linux/module.h>
17
18
19struct ot200_led {
20 struct led_classdev cdev;
21 const char *name;
22 unsigned long port;
23 u8 mask;
24};
25
26/*
27 * The device has three leds on the back panel (led_err, led_init and led_run)
28 * and can handle up to seven leds on the front panel.
29 */
30
31static struct ot200_led leds[] = {
32 {
33 .name = "led_run",
34 .port = 0x5a,
35 .mask = BIT(0),
36 },
37 {
38 .name = "led_init",
39 .port = 0x5a,
40 .mask = BIT(1),
41 },
42 {
43 .name = "led_err",
44 .port = 0x5a,
45 .mask = BIT(2),
46 },
47 {
48 .name = "led_1",
49 .port = 0x49,
50 .mask = BIT(7),
51 },
52 {
53 .name = "led_2",
54 .port = 0x49,
55 .mask = BIT(6),
56 },
57 {
58 .name = "led_3",
59 .port = 0x49,
60 .mask = BIT(5),
61 },
62 {
63 .name = "led_4",
64 .port = 0x49,
65 .mask = BIT(4),
66 },
67 {
68 .name = "led_5",
69 .port = 0x49,
70 .mask = BIT(3),
71 },
72 {
73 .name = "led_6",
74 .port = 0x49,
75 .mask = BIT(2),
76 },
77 {
78 .name = "led_7",
79 .port = 0x49,
80 .mask = BIT(1),
81 }
82};
83
84static DEFINE_SPINLOCK(value_lock);
85
86/*
87 * we need to store the current led states, as it is not
88 * possible to read the current led state via inb().
89 */
90static u8 leds_back;
91static u8 leds_front;
92
93static void ot200_led_brightness_set(struct led_classdev *led_cdev,
94 enum led_brightness value)
95{
96 struct ot200_led *led = container_of(led_cdev, struct ot200_led, cdev);
97 u8 *val;
98 unsigned long flags;
99
100 spin_lock_irqsave(&value_lock, flags);
101
102 if (led->port == 0x49)
103 val = &leds_front;
104 else if (led->port == 0x5a)
105 val = &leds_back;
106 else
107 BUG();
108
109 if (value == LED_OFF)
110 *val &= ~led->mask;
111 else
112 *val |= led->mask;
113
114 outb(*val, led->port);
115 spin_unlock_irqrestore(&value_lock, flags);
116}
117
118static int __devinit ot200_led_probe(struct platform_device *pdev)
119{
120 int i;
121 int ret;
122
123 for (i = 0; i < ARRAY_SIZE(leds); i++) {
124
125 leds[i].cdev.name = leds[i].name;
126 leds[i].cdev.brightness_set = ot200_led_brightness_set;
127
128 ret = led_classdev_register(&pdev->dev, &leds[i].cdev);
129 if (ret < 0)
130 goto err;
131 }
132
133 leds_front = 0; /* turn off all front leds */
134 leds_back = BIT(1); /* turn on init led */
135 outb(leds_front, 0x49);
136 outb(leds_back, 0x5a);
137
138 return 0;
139
140err:
141 for (i = i - 1; i >= 0; i--)
142 led_classdev_unregister(&leds[i].cdev);
143
144 return ret;
145}
146
147static int __devexit ot200_led_remove(struct platform_device *pdev)
148{
149 int i;
150
151 for (i = 0; i < ARRAY_SIZE(leds); i++)
152 led_classdev_unregister(&leds[i].cdev);
153
154 return 0;
155}
156
157static struct platform_driver ot200_led_driver = {
158 .probe = ot200_led_probe,
159 .remove = __devexit_p(ot200_led_remove),
160 .driver = {
161 .name = "leds-ot200",
162 .owner = THIS_MODULE,
163 },
164};
165
166module_platform_driver(ot200_led_driver);
167
168MODULE_AUTHOR("Sebastian A. Siewior <bigeasy@linutronix.de>");
169MODULE_DESCRIPTION("ot200 LED driver");
170MODULE_LICENSE("GPL");
171MODULE_ALIAS("platform:leds-ot200");
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index 27555995f7e4..b5ee3ebfcfca 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -24,6 +24,21 @@
24#include <linux/dvb/frontend.h> 24#include <linux/dvb/frontend.h>
25#include "dvb_frontend.h" 25#include "dvb_frontend.h"
26 26
27/* Registers (Write-only) */
28#define XREG_INIT 0x00
29#define XREG_RF_FREQ 0x02
30#define XREG_POWER_DOWN 0x08
31
32/* Registers (Read-only) */
33#define XREG_FREQ_ERROR 0x01
34#define XREG_LOCK 0x02
35#define XREG_VERSION 0x04
36#define XREG_PRODUCT_ID 0x08
37#define XREG_HSYNC_FREQ 0x10
38#define XREG_FRAME_LINES 0x20
39#define XREG_SNR 0x40
40
41#define XREG_ADC_ENV 0x0100
27 42
28static int debug; 43static int debug;
29module_param(debug, int, 0644); 44module_param(debug, int, 0644);
@@ -885,7 +900,7 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
885 mutex_lock(&priv->lock); 900 mutex_lock(&priv->lock);
886 901
887 /* Sync Lock Indicator */ 902 /* Sync Lock Indicator */
888 rc = xc2028_get_reg(priv, 0x0002, &frq_lock); 903 rc = xc2028_get_reg(priv, XREG_LOCK, &frq_lock);
889 if (rc < 0) 904 if (rc < 0)
890 goto ret; 905 goto ret;
891 906
@@ -894,7 +909,7 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
894 signal = 1 << 11; 909 signal = 1 << 11;
895 910
896 /* Get SNR of the video signal */ 911 /* Get SNR of the video signal */
897 rc = xc2028_get_reg(priv, 0x0040, &signal); 912 rc = xc2028_get_reg(priv, XREG_SNR, &signal);
898 if (rc < 0) 913 if (rc < 0)
899 goto ret; 914 goto ret;
900 915
@@ -1019,9 +1034,9 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
1019 1034
1020 /* CMD= Set frequency */ 1035 /* CMD= Set frequency */
1021 if (priv->firm_version < 0x0202) 1036 if (priv->firm_version < 0x0202)
1022 rc = send_seq(priv, {0x00, 0x02, 0x00, 0x00}); 1037 rc = send_seq(priv, {0x00, XREG_RF_FREQ, 0x00, 0x00});
1023 else 1038 else
1024 rc = send_seq(priv, {0x80, 0x02, 0x00, 0x00}); 1039 rc = send_seq(priv, {0x80, XREG_RF_FREQ, 0x00, 0x00});
1025 if (rc < 0) 1040 if (rc < 0)
1026 goto ret; 1041 goto ret;
1027 1042
@@ -1201,9 +1216,9 @@ static int xc2028_sleep(struct dvb_frontend *fe)
1201 mutex_lock(&priv->lock); 1216 mutex_lock(&priv->lock);
1202 1217
1203 if (priv->firm_version < 0x0202) 1218 if (priv->firm_version < 0x0202)
1204 rc = send_seq(priv, {0x00, 0x08, 0x00, 0x00}); 1219 rc = send_seq(priv, {0x00, XREG_POWER_DOWN, 0x00, 0x00});
1205 else 1220 else
1206 rc = send_seq(priv, {0x80, 0x08, 0x00, 0x00}); 1221 rc = send_seq(priv, {0x80, XREG_POWER_DOWN, 0x00, 0x00});
1207 1222
1208 priv->cur_fw.type = 0; /* need firmware reload */ 1223 priv->cur_fw.type = 0; /* need firmware reload */
1209 1224
diff --git a/drivers/media/common/tuners/xc4000.c b/drivers/media/common/tuners/xc4000.c
index d218c1d68c33..68397110b7d9 100644
--- a/drivers/media/common/tuners/xc4000.c
+++ b/drivers/media/common/tuners/xc4000.c
@@ -154,6 +154,8 @@ struct xc4000_priv {
154#define XREG_SNR 0x06 154#define XREG_SNR 0x06
155#define XREG_VERSION 0x07 155#define XREG_VERSION 0x07
156#define XREG_PRODUCT_ID 0x08 156#define XREG_PRODUCT_ID 0x08
157#define XREG_SIGNAL_LEVEL 0x0A
158#define XREG_NOISE_LEVEL 0x0B
157 159
158/* 160/*
159 Basic firmware description. This will remain with 161 Basic firmware description. This will remain with
@@ -486,6 +488,16 @@ static int xc_get_quality(struct xc4000_priv *priv, u16 *quality)
486 return xc4000_readreg(priv, XREG_QUALITY, quality); 488 return xc4000_readreg(priv, XREG_QUALITY, quality);
487} 489}
488 490
491static int xc_get_signal_level(struct xc4000_priv *priv, u16 *signal)
492{
493 return xc4000_readreg(priv, XREG_SIGNAL_LEVEL, signal);
494}
495
496static int xc_get_noise_level(struct xc4000_priv *priv, u16 *noise)
497{
498 return xc4000_readreg(priv, XREG_NOISE_LEVEL, noise);
499}
500
489static u16 xc_wait_for_lock(struct xc4000_priv *priv) 501static u16 xc_wait_for_lock(struct xc4000_priv *priv)
490{ 502{
491 u16 lock_state = 0; 503 u16 lock_state = 0;
@@ -1089,6 +1101,8 @@ static void xc_debug_dump(struct xc4000_priv *priv)
1089 u32 hsync_freq_hz = 0; 1101 u32 hsync_freq_hz = 0;
1090 u16 frame_lines; 1102 u16 frame_lines;
1091 u16 quality; 1103 u16 quality;
1104 u16 signal = 0;
1105 u16 noise = 0;
1092 u8 hw_majorversion = 0, hw_minorversion = 0; 1106 u8 hw_majorversion = 0, hw_minorversion = 0;
1093 u8 fw_majorversion = 0, fw_minorversion = 0; 1107 u8 fw_majorversion = 0, fw_minorversion = 0;
1094 1108
@@ -1119,6 +1133,12 @@ static void xc_debug_dump(struct xc4000_priv *priv)
1119 1133
1120 xc_get_quality(priv, &quality); 1134 xc_get_quality(priv, &quality);
1121 dprintk(1, "*** Quality (0:<8dB, 7:>56dB) = %d\n", quality); 1135 dprintk(1, "*** Quality (0:<8dB, 7:>56dB) = %d\n", quality);
1136
1137 xc_get_signal_level(priv, &signal);
1138 dprintk(1, "*** Signal level = -%ddB (%d)\n", signal >> 8, signal);
1139
1140 xc_get_noise_level(priv, &noise);
1141 dprintk(1, "*** Noise level = %ddB (%d)\n", noise >> 8, noise);
1122} 1142}
1123 1143
1124static int xc4000_set_params(struct dvb_frontend *fe) 1144static int xc4000_set_params(struct dvb_frontend *fe)
@@ -1432,6 +1452,71 @@ fail:
1432 return ret; 1452 return ret;
1433} 1453}
1434 1454
1455static int xc4000_get_signal(struct dvb_frontend *fe, u16 *strength)
1456{
1457 struct xc4000_priv *priv = fe->tuner_priv;
1458 u16 value = 0;
1459 int rc;
1460
1461 mutex_lock(&priv->lock);
1462 rc = xc4000_readreg(priv, XREG_SIGNAL_LEVEL, &value);
1463 mutex_unlock(&priv->lock);
1464
1465 if (rc < 0)
1466 goto ret;
1467
1468 /* Informations from real testing of DVB-T and radio part,
1469 coeficient for one dB is 0xff.
1470 */
1471 tuner_dbg("Signal strength: -%ddB (%05d)\n", value >> 8, value);
1472
1473 /* all known digital modes */
1474 if ((priv->video_standard == XC4000_DTV6) ||
1475 (priv->video_standard == XC4000_DTV7) ||
1476 (priv->video_standard == XC4000_DTV7_8) ||
1477 (priv->video_standard == XC4000_DTV8))
1478 goto digital;
1479
1480 /* Analog mode has NOISE LEVEL important, signal
1481 depends only on gain of antenna and amplifiers,
1482 but it doesn't tell anything about real quality
1483 of reception.
1484 */
1485 mutex_lock(&priv->lock);
1486 rc = xc4000_readreg(priv, XREG_NOISE_LEVEL, &value);
1487 mutex_unlock(&priv->lock);
1488
1489 tuner_dbg("Noise level: %ddB (%05d)\n", value >> 8, value);
1490
1491 /* highest noise level: 32dB */
1492 if (value >= 0x2000) {
1493 value = 0;
1494 } else {
1495 value = ~value << 3;
1496 }
1497
1498 goto ret;
1499
1500 /* Digital mode has SIGNAL LEVEL important and real
1501 noise level is stored in demodulator registers.
1502 */
1503digital:
1504 /* best signal: -50dB */
1505 if (value <= 0x3200) {
1506 value = 0xffff;
1507 /* minimum: -114dB - should be 0x7200 but real zero is 0x713A */
1508 } else if (value >= 0x713A) {
1509 value = 0;
1510 } else {
1511 value = ~(value - 0x3200) << 2;
1512 }
1513
1514ret:
1515 *strength = value;
1516
1517 return rc;
1518}
1519
1435static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq) 1520static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
1436{ 1521{
1437 struct xc4000_priv *priv = fe->tuner_priv; 1522 struct xc4000_priv *priv = fe->tuner_priv;
@@ -1559,6 +1644,7 @@ static const struct dvb_tuner_ops xc4000_tuner_ops = {
1559 .set_params = xc4000_set_params, 1644 .set_params = xc4000_set_params,
1560 .set_analog_params = xc4000_set_analog_params, 1645 .set_analog_params = xc4000_set_analog_params,
1561 .get_frequency = xc4000_get_frequency, 1646 .get_frequency = xc4000_get_frequency,
1647 .get_rf_strength = xc4000_get_signal,
1562 .get_bandwidth = xc4000_get_bandwidth, 1648 .get_bandwidth = xc4000_get_bandwidth,
1563 .get_status = xc4000_get_status 1649 .get_status = xc4000_get_status
1564}; 1650};
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index b15db4fe347b..fbbe545a74cb 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -904,8 +904,11 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
904{ 904{
905 struct dtv_frontend_properties *c = &fe->dtv_property_cache; 905 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
906 int i; 906 int i;
907 u32 delsys;
907 908
909 delsys = c->delivery_system;
908 memset(c, 0, sizeof(struct dtv_frontend_properties)); 910 memset(c, 0, sizeof(struct dtv_frontend_properties));
911 c->delivery_system = delsys;
909 912
910 c->state = DTV_CLEAR; 913 c->state = DTV_CLEAR;
911 914
@@ -1009,25 +1012,6 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
1009 _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 1, 0), 1012 _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 1, 0),
1010 _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 1, 0), 1013 _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 1, 0),
1011 1014
1012 _DTV_CMD(DTV_ISDBT_PARTIAL_RECEPTION, 0, 0),
1013 _DTV_CMD(DTV_ISDBT_SOUND_BROADCASTING, 0, 0),
1014 _DTV_CMD(DTV_ISDBT_SB_SUBCHANNEL_ID, 0, 0),
1015 _DTV_CMD(DTV_ISDBT_SB_SEGMENT_IDX, 0, 0),
1016 _DTV_CMD(DTV_ISDBT_SB_SEGMENT_COUNT, 0, 0),
1017 _DTV_CMD(DTV_ISDBT_LAYER_ENABLED, 0, 0),
1018 _DTV_CMD(DTV_ISDBT_LAYERA_FEC, 0, 0),
1019 _DTV_CMD(DTV_ISDBT_LAYERA_MODULATION, 0, 0),
1020 _DTV_CMD(DTV_ISDBT_LAYERA_SEGMENT_COUNT, 0, 0),
1021 _DTV_CMD(DTV_ISDBT_LAYERA_TIME_INTERLEAVING, 0, 0),
1022 _DTV_CMD(DTV_ISDBT_LAYERB_FEC, 0, 0),
1023 _DTV_CMD(DTV_ISDBT_LAYERB_MODULATION, 0, 0),
1024 _DTV_CMD(DTV_ISDBT_LAYERB_SEGMENT_COUNT, 0, 0),
1025 _DTV_CMD(DTV_ISDBT_LAYERB_TIME_INTERLEAVING, 0, 0),
1026 _DTV_CMD(DTV_ISDBT_LAYERC_FEC, 0, 0),
1027 _DTV_CMD(DTV_ISDBT_LAYERC_MODULATION, 0, 0),
1028 _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 0, 0),
1029 _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 0, 0),
1030
1031 _DTV_CMD(DTV_ISDBS_TS_ID, 1, 0), 1015 _DTV_CMD(DTV_ISDBS_TS_ID, 1, 0),
1032 _DTV_CMD(DTV_DVBT2_PLP_ID, 1, 0), 1016 _DTV_CMD(DTV_DVBT2_PLP_ID, 1, 0),
1033 1017
@@ -1413,6 +1397,15 @@ static int set_delivery_system(struct dvb_frontend *fe, u32 desired_system)
1413 struct dtv_frontend_properties *c = &fe->dtv_property_cache; 1397 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
1414 enum dvbv3_emulation_type type; 1398 enum dvbv3_emulation_type type;
1415 1399
1400 /*
1401 * It was reported that some old DVBv5 applications were
1402 * filling delivery_system with SYS_UNDEFINED. If this happens,
1403 * assume that the application wants to use the first supported
1404 * delivery system.
1405 */
1406 if (c->delivery_system == SYS_UNDEFINED)
1407 c->delivery_system = fe->ops.delsys[0];
1408
1416 if (desired_system == SYS_UNDEFINED) { 1409 if (desired_system == SYS_UNDEFINED) {
1417 /* 1410 /*
1418 * A DVBv3 call doesn't know what's the desired system. 1411 * A DVBv3 call doesn't know what's the desired system.
@@ -1732,6 +1725,7 @@ static int dvb_frontend_ioctl_properties(struct file *file,
1732{ 1725{
1733 struct dvb_device *dvbdev = file->private_data; 1726 struct dvb_device *dvbdev = file->private_data;
1734 struct dvb_frontend *fe = dvbdev->priv; 1727 struct dvb_frontend *fe = dvbdev->priv;
1728 struct dvb_frontend_private *fepriv = fe->frontend_priv;
1735 struct dtv_frontend_properties *c = &fe->dtv_property_cache; 1729 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
1736 int err = 0; 1730 int err = 0;
1737 1731
@@ -1798,9 +1792,14 @@ static int dvb_frontend_ioctl_properties(struct file *file,
1798 1792
1799 /* 1793 /*
1800 * Fills the cache out struct with the cache contents, plus 1794 * Fills the cache out struct with the cache contents, plus
1801 * the data retrieved from get_frontend. 1795 * the data retrieved from get_frontend, if the frontend
1796 * is not idle. Otherwise, returns the cached content
1802 */ 1797 */
1803 dtv_get_frontend(fe, NULL); 1798 if (fepriv->state != FESTATE_IDLE) {
1799 err = dtv_get_frontend(fe, NULL);
1800 if (err < 0)
1801 goto out;
1802 }
1804 for (i = 0; i < tvps->num; i++) { 1803 for (i = 0; i < tvps->num; i++) {
1805 err = dtv_property_process_get(fe, c, tvp + i, file); 1804 err = dtv_property_process_get(fe, c, tvp + i, file);
1806 if (err < 0) 1805 if (err < 0)
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index d66192974d68..1455e2644ab5 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -877,24 +877,18 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
877 case ANYSEE_HW_508T2C: /* 20 */ 877 case ANYSEE_HW_508T2C: /* 20 */
878 /* E7 T2C */ 878 /* E7 T2C */
879 879
880 if (state->fe_id)
881 break;
882
880 /* enable DVB-T/T2/C demod on IOE[5] */ 883 /* enable DVB-T/T2/C demod on IOE[5] */
881 ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 5), 0x20); 884 ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 5), 0x20);
882 if (ret) 885 if (ret)
883 goto error; 886 goto error;
884 887
885 if (state->fe_id == 0) { 888 /* attach demod */
886 /* DVB-T/T2 */ 889 adap->fe_adap[state->fe_id].fe = dvb_attach(cxd2820r_attach,
887 adap->fe_adap[state->fe_id].fe = 890 &anysee_cxd2820r_config, &adap->dev->i2c_adap,
888 dvb_attach(cxd2820r_attach, 891 NULL);
889 &anysee_cxd2820r_config,
890 &adap->dev->i2c_adap, NULL);
891 } else {
892 /* DVB-C */
893 adap->fe_adap[state->fe_id].fe =
894 dvb_attach(cxd2820r_attach,
895 &anysee_cxd2820r_config,
896 &adap->dev->i2c_adap, adap->fe_adap[0].fe);
897 }
898 892
899 state->has_ci = true; 893 state->has_ci = true;
900 894
diff --git a/drivers/media/dvb/dvb-usb/dib0700.h b/drivers/media/dvb/dvb-usb/dib0700.h
index 9bd6d51b3b93..7de125c0b36f 100644
--- a/drivers/media/dvb/dvb-usb/dib0700.h
+++ b/drivers/media/dvb/dvb-usb/dib0700.h
@@ -48,6 +48,8 @@ struct dib0700_state {
48 u8 disable_streaming_master_mode; 48 u8 disable_streaming_master_mode;
49 u32 fw_version; 49 u32 fw_version;
50 u32 nb_packet_buffer_size; 50 u32 nb_packet_buffer_size;
51 int (*read_status)(struct dvb_frontend *, fe_status_t *);
52 int (*sleep)(struct dvb_frontend* fe);
51 u8 buf[255]; 53 u8 buf[255];
52}; 54};
53 55
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 206999476f02..070e82aa53f5 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -834,6 +834,7 @@ static struct usb_driver dib0700_driver = {
834 834
835module_usb_driver(dib0700_driver); 835module_usb_driver(dib0700_driver);
836 836
837MODULE_FIRMWARE("dvb-usb-dib0700-1.20.fw");
837MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); 838MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
838MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge"); 839MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge");
839MODULE_VERSION("1.0"); 840MODULE_VERSION("1.0");
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 81ef4b46f790..f9e966aa26e7 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -3066,19 +3066,25 @@ static struct dib7000p_config stk7070pd_dib7000p_config[2] = {
3066 } 3066 }
3067}; 3067};
3068 3068
3069static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap) 3069static void stk7070pd_init(struct dvb_usb_device *dev)
3070{ 3070{
3071 dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); 3071 dib0700_set_gpio(dev, GPIO6, GPIO_OUT, 1);
3072 msleep(10); 3072 msleep(10);
3073 dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); 3073 dib0700_set_gpio(dev, GPIO9, GPIO_OUT, 1);
3074 dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); 3074 dib0700_set_gpio(dev, GPIO4, GPIO_OUT, 1);
3075 dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); 3075 dib0700_set_gpio(dev, GPIO7, GPIO_OUT, 1);
3076 dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); 3076 dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 0);
3077 3077
3078 dib0700_ctrl_clock(adap->dev, 72, 1); 3078 dib0700_ctrl_clock(dev, 72, 1);
3079 3079
3080 msleep(10); 3080 msleep(10);
3081 dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); 3081 dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 1);
3082}
3083
3084static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
3085{
3086 stk7070pd_init(adap->dev);
3087
3082 msleep(10); 3088 msleep(10);
3083 dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); 3089 dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
3084 3090
@@ -3099,6 +3105,77 @@ static int stk7070pd_frontend_attach1(struct dvb_usb_adapter *adap)
3099 return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; 3105 return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
3100} 3106}
3101 3107
3108static int novatd_read_status_override(struct dvb_frontend *fe,
3109 fe_status_t *stat)
3110{
3111 struct dvb_usb_adapter *adap = fe->dvb->priv;
3112 struct dvb_usb_device *dev = adap->dev;
3113 struct dib0700_state *state = dev->priv;
3114 int ret;
3115
3116 ret = state->read_status(fe, stat);
3117
3118 if (!ret)
3119 dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT,
3120 !!(*stat & FE_HAS_LOCK));
3121
3122 return ret;
3123}
3124
3125static int novatd_sleep_override(struct dvb_frontend* fe)
3126{
3127 struct dvb_usb_adapter *adap = fe->dvb->priv;
3128 struct dvb_usb_device *dev = adap->dev;
3129 struct dib0700_state *state = dev->priv;
3130
3131 /* turn off LED */
3132 dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT, 0);
3133
3134 return state->sleep(fe);
3135}
3136
3137/**
3138 * novatd_frontend_attach - Nova-TD specific attach
3139 *
3140 * Nova-TD has GPIO0, 1 and 2 for LEDs. So do not fiddle with them except for
3141 * information purposes.
3142 */
3143static int novatd_frontend_attach(struct dvb_usb_adapter *adap)
3144{
3145 struct dvb_usb_device *dev = adap->dev;
3146 struct dib0700_state *st = dev->priv;
3147
3148 if (adap->id == 0) {
3149 stk7070pd_init(dev);
3150
3151 /* turn the power LED on, the other two off (just in case) */
3152 dib0700_set_gpio(dev, GPIO0, GPIO_OUT, 0);
3153 dib0700_set_gpio(dev, GPIO1, GPIO_OUT, 0);
3154 dib0700_set_gpio(dev, GPIO2, GPIO_OUT, 1);
3155
3156 if (dib7000p_i2c_enumeration(&dev->i2c_adap, 2, 18,
3157 stk7070pd_dib7000p_config) != 0) {
3158 err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n",
3159 __func__);
3160 return -ENODEV;
3161 }
3162 }
3163
3164 adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &dev->i2c_adap,
3165 adap->id == 0 ? 0x80 : 0x82,
3166 &stk7070pd_dib7000p_config[adap->id]);
3167
3168 if (adap->fe_adap[0].fe == NULL)
3169 return -ENODEV;
3170
3171 st->read_status = adap->fe_adap[0].fe->ops.read_status;
3172 adap->fe_adap[0].fe->ops.read_status = novatd_read_status_override;
3173 st->sleep = adap->fe_adap[0].fe->ops.sleep;
3174 adap->fe_adap[0].fe->ops.sleep = novatd_sleep_override;
3175
3176 return 0;
3177}
3178
3102/* S5H1411 */ 3179/* S5H1411 */
3103static struct s5h1411_config pinnacle_801e_config = { 3180static struct s5h1411_config pinnacle_801e_config = {
3104 .output_mode = S5H1411_PARALLEL_OUTPUT, 3181 .output_mode = S5H1411_PARALLEL_OUTPUT,
@@ -3870,6 +3947,57 @@ struct dvb_usb_device_properties dib0700_devices[] = {
3870 .pid_filter_count = 32, 3947 .pid_filter_count = 32,
3871 .pid_filter = stk70x0p_pid_filter, 3948 .pid_filter = stk70x0p_pid_filter,
3872 .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, 3949 .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
3950 .frontend_attach = novatd_frontend_attach,
3951 .tuner_attach = dib7070p_tuner_attach,
3952
3953 DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
3954 }},
3955 .size_of_priv = sizeof(struct dib0700_adapter_state),
3956 }, {
3957 .num_frontends = 1,
3958 .fe = {{
3959 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
3960 .pid_filter_count = 32,
3961 .pid_filter = stk70x0p_pid_filter,
3962 .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
3963 .frontend_attach = novatd_frontend_attach,
3964 .tuner_attach = dib7070p_tuner_attach,
3965
3966 DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
3967 }},
3968 .size_of_priv = sizeof(struct dib0700_adapter_state),
3969 }
3970 },
3971
3972 .num_device_descs = 1,
3973 .devices = {
3974 { "Hauppauge Nova-TD Stick (52009)",
3975 { &dib0700_usb_id_table[35], NULL },
3976 { NULL },
3977 },
3978 },
3979
3980 .rc.core = {
3981 .rc_interval = DEFAULT_RC_INTERVAL,
3982 .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
3983 .module_name = "dib0700",
3984 .rc_query = dib0700_rc_query_old_firmware,
3985 .allowed_protos = RC_TYPE_RC5 |
3986 RC_TYPE_RC6 |
3987 RC_TYPE_NEC,
3988 .change_protocol = dib0700_change_protocol,
3989 },
3990 }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
3991
3992 .num_adapters = 2,
3993 .adapter = {
3994 {
3995 .num_frontends = 1,
3996 .fe = {{
3997 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
3998 .pid_filter_count = 32,
3999 .pid_filter = stk70x0p_pid_filter,
4000 .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
3873 .frontend_attach = stk7070pd_frontend_attach0, 4001 .frontend_attach = stk7070pd_frontend_attach0,
3874 .tuner_attach = dib7070p_tuner_attach, 4002 .tuner_attach = dib7070p_tuner_attach,
3875 4003
@@ -3892,7 +4020,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
3892 } 4020 }
3893 }, 4021 },
3894 4022
3895 .num_device_descs = 6, 4023 .num_device_descs = 5,
3896 .devices = { 4024 .devices = {
3897 { "DiBcom STK7070PD reference design", 4025 { "DiBcom STK7070PD reference design",
3898 { &dib0700_usb_id_table[17], NULL }, 4026 { &dib0700_usb_id_table[17], NULL },
@@ -3902,10 +4030,6 @@ struct dvb_usb_device_properties dib0700_devices[] = {
3902 { &dib0700_usb_id_table[18], NULL }, 4030 { &dib0700_usb_id_table[18], NULL },
3903 { NULL }, 4031 { NULL },
3904 }, 4032 },
3905 { "Hauppauge Nova-TD Stick (52009)",
3906 { &dib0700_usb_id_table[35], NULL },
3907 { NULL },
3908 },
3909 { "Hauppauge Nova-TD-500 (84xxx)", 4033 { "Hauppauge Nova-TD-500 (84xxx)",
3910 { &dib0700_usb_id_table[36], NULL }, 4034 { &dib0700_usb_id_table[36], NULL },
3911 { NULL }, 4035 { NULL },
diff --git a/drivers/media/dvb/frontends/cxd2820r_core.c b/drivers/media/dvb/frontends/cxd2820r_core.c
index 93e1b12e7907..caae7f79c837 100644
--- a/drivers/media/dvb/frontends/cxd2820r_core.c
+++ b/drivers/media/dvb/frontends/cxd2820r_core.c
@@ -309,9 +309,14 @@ static int cxd2820r_read_status(struct dvb_frontend *fe, fe_status_t *status)
309 309
310static int cxd2820r_get_frontend(struct dvb_frontend *fe) 310static int cxd2820r_get_frontend(struct dvb_frontend *fe)
311{ 311{
312 struct cxd2820r_priv *priv = fe->demodulator_priv;
312 int ret; 313 int ret;
313 314
314 dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system); 315 dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
316
317 if (priv->delivery_system == SYS_UNDEFINED)
318 return 0;
319
315 switch (fe->dtv_property_cache.delivery_system) { 320 switch (fe->dtv_property_cache.delivery_system) {
316 case SYS_DVBT: 321 case SYS_DVBT:
317 ret = cxd2820r_get_frontend_t(fe); 322 ret = cxd2820r_get_frontend_t(fe);
@@ -476,10 +481,10 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
476 dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system); 481 dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
477 482
478 /* switch between DVB-T and DVB-T2 when tune fails */ 483 /* switch between DVB-T and DVB-T2 when tune fails */
479 if (priv->last_tune_failed && (priv->delivery_system != SYS_DVBC_ANNEX_A)) { 484 if (priv->last_tune_failed) {
480 if (priv->delivery_system == SYS_DVBT) 485 if (priv->delivery_system == SYS_DVBT)
481 c->delivery_system = SYS_DVBT2; 486 c->delivery_system = SYS_DVBT2;
482 else 487 else if (priv->delivery_system == SYS_DVBT2)
483 c->delivery_system = SYS_DVBT; 488 c->delivery_system = SYS_DVBT;
484 } 489 }
485 490
@@ -492,6 +497,7 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
492 /* frontend lock wait loop count */ 497 /* frontend lock wait loop count */
493 switch (priv->delivery_system) { 498 switch (priv->delivery_system) {
494 case SYS_DVBT: 499 case SYS_DVBT:
500 case SYS_DVBC_ANNEX_A:
495 i = 20; 501 i = 20;
496 break; 502 break;
497 case SYS_DVBT2: 503 case SYS_DVBT2:
diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
index 938777065de6..af65d013db11 100644
--- a/drivers/media/dvb/frontends/ds3000.c
+++ b/drivers/media/dvb/frontends/ds3000.c
@@ -1195,7 +1195,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe)
1195 1195
1196 for (i = 0; i < 30 ; i++) { 1196 for (i = 0; i < 30 ; i++) {
1197 ds3000_read_status(fe, &status); 1197 ds3000_read_status(fe, &status);
1198 if (status && FE_HAS_LOCK) 1198 if (status & FE_HAS_LOCK)
1199 break; 1199 break;
1200 1200
1201 msleep(10); 1201 msleep(10);
diff --git a/drivers/media/dvb/frontends/mb86a20s.c b/drivers/media/dvb/frontends/mb86a20s.c
index 7fa3e472cdca..fade566927c3 100644
--- a/drivers/media/dvb/frontends/mb86a20s.c
+++ b/drivers/media/dvb/frontends/mb86a20s.c
@@ -402,7 +402,7 @@ static int mb86a20s_get_modulation(struct mb86a20s_state *state,
402 [2] = 0x8e, /* Layer C */ 402 [2] = 0x8e, /* Layer C */
403 }; 403 };
404 404
405 if (layer > ARRAY_SIZE(reg)) 405 if (layer >= ARRAY_SIZE(reg))
406 return -EINVAL; 406 return -EINVAL;
407 rc = mb86a20s_writereg(state, 0x6d, reg[layer]); 407 rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
408 if (rc < 0) 408 if (rc < 0)
@@ -435,7 +435,7 @@ static int mb86a20s_get_fec(struct mb86a20s_state *state,
435 [2] = 0x8f, /* Layer C */ 435 [2] = 0x8f, /* Layer C */
436 }; 436 };
437 437
438 if (layer > ARRAY_SIZE(reg)) 438 if (layer >= ARRAY_SIZE(reg))
439 return -EINVAL; 439 return -EINVAL;
440 rc = mb86a20s_writereg(state, 0x6d, reg[layer]); 440 rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
441 if (rc < 0) 441 if (rc < 0)
@@ -470,7 +470,7 @@ static int mb86a20s_get_interleaving(struct mb86a20s_state *state,
470 [2] = 0x90, /* Layer C */ 470 [2] = 0x90, /* Layer C */
471 }; 471 };
472 472
473 if (layer > ARRAY_SIZE(reg)) 473 if (layer >= ARRAY_SIZE(reg))
474 return -EINVAL; 474 return -EINVAL;
475 rc = mb86a20s_writereg(state, 0x6d, reg[layer]); 475 rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
476 if (rc < 0) 476 if (rc < 0)
@@ -494,7 +494,7 @@ static int mb86a20s_get_segment_count(struct mb86a20s_state *state,
494 [2] = 0x91, /* Layer C */ 494 [2] = 0x91, /* Layer C */
495 }; 495 };
496 496
497 if (layer > ARRAY_SIZE(reg)) 497 if (layer >= ARRAY_SIZE(reg))
498 return -EINVAL; 498 return -EINVAL;
499 rc = mb86a20s_writereg(state, 0x6d, reg[layer]); 499 rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
500 if (rc < 0) 500 if (rc < 0)
diff --git a/drivers/media/dvb/frontends/tda18271c2dd.c b/drivers/media/dvb/frontends/tda18271c2dd.c
index 86da3d816498..ad7c72e8f517 100644
--- a/drivers/media/dvb/frontends/tda18271c2dd.c
+++ b/drivers/media/dvb/frontends/tda18271c2dd.c
@@ -29,7 +29,6 @@
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/firmware.h> 30#include <linux/firmware.h>
31#include <linux/i2c.h> 31#include <linux/i2c.h>
32#include <linux/version.h>
33#include <asm/div64.h> 32#include <asm/div64.h>
34 33
35#include "dvb_frontend.h" 34#include "dvb_frontend.h"
diff --git a/drivers/media/video/as3645a.c b/drivers/media/video/as3645a.c
index ec859a580651..f241702a0f36 100644
--- a/drivers/media/video/as3645a.c
+++ b/drivers/media/video/as3645a.c
@@ -29,6 +29,7 @@
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/slab.h>
32 33
33#include <media/as3645a.h> 34#include <media/as3645a.h>
34#include <media/v4l2-ctrls.h> 35#include <media/v4l2-ctrls.h>
diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c
index 14cb961c22bd..4bfd865a4106 100644
--- a/drivers/media/video/cx18/cx18-fileops.c
+++ b/drivers/media/video/cx18/cx18-fileops.c
@@ -751,20 +751,10 @@ int cx18_v4l2_close(struct file *filp)
751 751
752 CX18_DEBUG_IOCTL("close() of %s\n", s->name); 752 CX18_DEBUG_IOCTL("close() of %s\n", s->name);
753 753
754 v4l2_fh_del(fh);
755 v4l2_fh_exit(fh);
756
757 /* Easy case first: this stream was never claimed by us */
758 if (s->id != id->open_id) {
759 kfree(id);
760 return 0;
761 }
762
763 /* 'Unclaim' this stream */
764
765 /* Stop radio */
766 mutex_lock(&cx->serialize_lock); 754 mutex_lock(&cx->serialize_lock);
767 if (id->type == CX18_ENC_STREAM_TYPE_RAD) { 755 /* Stop radio */
756 if (id->type == CX18_ENC_STREAM_TYPE_RAD &&
757 v4l2_fh_is_singular_file(filp)) {
768 /* Closing radio device, return to TV mode */ 758 /* Closing radio device, return to TV mode */
769 cx18_mute(cx); 759 cx18_mute(cx);
770 /* Mark that the radio is no longer in use */ 760 /* Mark that the radio is no longer in use */
@@ -781,10 +771,14 @@ int cx18_v4l2_close(struct file *filp)
781 } 771 }
782 /* Done! Unmute and continue. */ 772 /* Done! Unmute and continue. */
783 cx18_unmute(cx); 773 cx18_unmute(cx);
784 cx18_release_stream(s);
785 } else {
786 cx18_stop_capture(id, 0);
787 } 774 }
775
776 v4l2_fh_del(fh);
777 v4l2_fh_exit(fh);
778
779 /* 'Unclaim' this stream */
780 if (s->id == id->open_id)
781 cx18_stop_capture(id, 0);
788 kfree(id); 782 kfree(id);
789 mutex_unlock(&cx->serialize_lock); 783 mutex_unlock(&cx->serialize_lock);
790 return 0; 784 return 0;
@@ -810,21 +804,15 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
810 804
811 item->open_id = cx->open_id++; 805 item->open_id = cx->open_id++;
812 filp->private_data = &item->fh; 806 filp->private_data = &item->fh;
807 v4l2_fh_add(&item->fh);
813 808
814 if (item->type == CX18_ENC_STREAM_TYPE_RAD) { 809 if (item->type == CX18_ENC_STREAM_TYPE_RAD &&
815 /* Try to claim this stream */ 810 v4l2_fh_is_singular_file(filp)) {
816 if (cx18_claim_stream(item, item->type)) {
817 /* No, it's already in use */
818 v4l2_fh_exit(&item->fh);
819 kfree(item);
820 return -EBUSY;
821 }
822
823 if (!test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) { 811 if (!test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) {
824 if (atomic_read(&cx->ana_capturing) > 0) { 812 if (atomic_read(&cx->ana_capturing) > 0) {
825 /* switching to radio while capture is 813 /* switching to radio while capture is
826 in progress is not polite */ 814 in progress is not polite */
827 cx18_release_stream(s); 815 v4l2_fh_del(&item->fh);
828 v4l2_fh_exit(&item->fh); 816 v4l2_fh_exit(&item->fh);
829 kfree(item); 817 kfree(item);
830 return -EBUSY; 818 return -EBUSY;
@@ -842,7 +830,6 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
842 /* Done! Unmute and continue. */ 830 /* Done! Unmute and continue. */
843 cx18_unmute(cx); 831 cx18_unmute(cx);
844 } 832 }
845 v4l2_fh_add(&item->fh);
846 return 0; 833 return 0;
847} 834}
848 835
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 919ed77b32f2..875a7ce94736 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -1052,7 +1052,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
1052 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1052 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1053 if (dev == NULL) { 1053 if (dev == NULL) {
1054 cx231xx_err(DRIVER_NAME ": out of memory!\n"); 1054 cx231xx_err(DRIVER_NAME ": out of memory!\n");
1055 clear_bit(dev->devno, &cx231xx_devused); 1055 clear_bit(nr, &cx231xx_devused);
1056 return -ENOMEM; 1056 return -ENOMEM;
1057 } 1057 }
1058 1058
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 3c01be999e35..19b5499d2624 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -213,8 +213,8 @@ struct cx23885_board cx23885_boards[] = {
213 .portc = CX23885_MPEG_DVB, 213 .portc = CX23885_MPEG_DVB,
214 .tuner_type = TUNER_XC4000, 214 .tuner_type = TUNER_XC4000,
215 .tuner_addr = 0x61, 215 .tuner_addr = 0x61,
216 .radio_type = TUNER_XC4000, 216 .radio_type = UNSET,
217 .radio_addr = 0x61, 217 .radio_addr = ADDR_UNSET,
218 .input = {{ 218 .input = {{
219 .type = CX23885_VMUX_TELEVISION, 219 .type = CX23885_VMUX_TELEVISION,
220 .vmux = CX25840_VIN2_CH1 | 220 .vmux = CX25840_VIN2_CH1 |
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index af8a225763d3..6835eb1fc093 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -943,6 +943,11 @@ static int dvb_register(struct cx23885_tsport *port)
943 943
944 fe = dvb_attach(xc4000_attach, fe0->dvb.frontend, 944 fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
945 &dev->i2c_bus[1].i2c_adap, &cfg); 945 &dev->i2c_bus[1].i2c_adap, &cfg);
946 if (!fe) {
947 printk(KERN_ERR "%s/2: xc4000 attach failed\n",
948 dev->name);
949 goto frontend_detach;
950 }
946 } 951 }
947 break; 952 break;
948 case CX23885_BOARD_TBS_6920: 953 case CX23885_BOARD_TBS_6920:
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 4bbf9bb97bde..c654bdc7ccb2 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -1550,7 +1550,6 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
1550 struct v4l2_control ctrl; 1550 struct v4l2_control ctrl;
1551 struct videobuf_dvb_frontend *vfe; 1551 struct videobuf_dvb_frontend *vfe;
1552 struct dvb_frontend *fe; 1552 struct dvb_frontend *fe;
1553 int err = 0;
1554 1553
1555 struct analog_parameters params = { 1554 struct analog_parameters params = {
1556 .mode = V4L2_TUNER_ANALOG_TV, 1555 .mode = V4L2_TUNER_ANALOG_TV,
@@ -1572,8 +1571,10 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
1572 params.frequency, f->tuner, params.std); 1571 params.frequency, f->tuner, params.std);
1573 1572
1574 vfe = videobuf_dvb_get_frontend(&dev->ts2.frontends, 1); 1573 vfe = videobuf_dvb_get_frontend(&dev->ts2.frontends, 1);
1575 if (!vfe) 1574 if (!vfe) {
1576 err = -EINVAL; 1575 mutex_unlock(&dev->lock);
1576 return -EINVAL;
1577 }
1577 1578
1578 fe = vfe->dvb.frontend; 1579 fe = vfe->dvb.frontend;
1579 1580
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 62c7ad050f9b..cbd5d119a2c6 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1573,8 +1573,8 @@ static const struct cx88_board cx88_boards[] = {
1573 .name = "Pinnacle Hybrid PCTV", 1573 .name = "Pinnacle Hybrid PCTV",
1574 .tuner_type = TUNER_XC2028, 1574 .tuner_type = TUNER_XC2028,
1575 .tuner_addr = 0x61, 1575 .tuner_addr = 0x61,
1576 .radio_type = TUNER_XC2028, 1576 .radio_type = UNSET,
1577 .radio_addr = 0x61, 1577 .radio_addr = ADDR_UNSET,
1578 .input = { { 1578 .input = { {
1579 .type = CX88_VMUX_TELEVISION, 1579 .type = CX88_VMUX_TELEVISION,
1580 .vmux = 0, 1580 .vmux = 0,
@@ -1611,8 +1611,8 @@ static const struct cx88_board cx88_boards[] = {
1611 .name = "Leadtek TV2000 XP Global", 1611 .name = "Leadtek TV2000 XP Global",
1612 .tuner_type = TUNER_XC2028, 1612 .tuner_type = TUNER_XC2028,
1613 .tuner_addr = 0x61, 1613 .tuner_addr = 0x61,
1614 .radio_type = TUNER_XC2028, 1614 .radio_type = UNSET,
1615 .radio_addr = 0x61, 1615 .radio_addr = ADDR_UNSET,
1616 .input = { { 1616 .input = { {
1617 .type = CX88_VMUX_TELEVISION, 1617 .type = CX88_VMUX_TELEVISION,
1618 .vmux = 0, 1618 .vmux = 0,
@@ -2115,8 +2115,8 @@ static const struct cx88_board cx88_boards[] = {
2115 .name = "Terratec Cinergy HT PCI MKII", 2115 .name = "Terratec Cinergy HT PCI MKII",
2116 .tuner_type = TUNER_XC2028, 2116 .tuner_type = TUNER_XC2028,
2117 .tuner_addr = 0x61, 2117 .tuner_addr = 0x61,
2118 .radio_type = TUNER_XC2028, 2118 .radio_type = UNSET,
2119 .radio_addr = 0x61, 2119 .radio_addr = ADDR_UNSET,
2120 .input = { { 2120 .input = { {
2121 .type = CX88_VMUX_TELEVISION, 2121 .type = CX88_VMUX_TELEVISION,
2122 .vmux = 0, 2122 .vmux = 0,
@@ -2154,9 +2154,9 @@ static const struct cx88_board cx88_boards[] = {
2154 [CX88_BOARD_WINFAST_DTV1800H] = { 2154 [CX88_BOARD_WINFAST_DTV1800H] = {
2155 .name = "Leadtek WinFast DTV1800 Hybrid", 2155 .name = "Leadtek WinFast DTV1800 Hybrid",
2156 .tuner_type = TUNER_XC2028, 2156 .tuner_type = TUNER_XC2028,
2157 .radio_type = TUNER_XC2028, 2157 .radio_type = UNSET,
2158 .tuner_addr = 0x61, 2158 .tuner_addr = 0x61,
2159 .radio_addr = 0x61, 2159 .radio_addr = ADDR_UNSET,
2160 /* 2160 /*
2161 * GPIO setting 2161 * GPIO setting
2162 * 2162 *
@@ -2195,9 +2195,9 @@ static const struct cx88_board cx88_boards[] = {
2195 [CX88_BOARD_WINFAST_DTV1800H_XC4000] = { 2195 [CX88_BOARD_WINFAST_DTV1800H_XC4000] = {
2196 .name = "Leadtek WinFast DTV1800 H (XC4000)", 2196 .name = "Leadtek WinFast DTV1800 H (XC4000)",
2197 .tuner_type = TUNER_XC4000, 2197 .tuner_type = TUNER_XC4000,
2198 .radio_type = TUNER_XC4000, 2198 .radio_type = UNSET,
2199 .tuner_addr = 0x61, 2199 .tuner_addr = 0x61,
2200 .radio_addr = 0x61, 2200 .radio_addr = ADDR_UNSET,
2201 /* 2201 /*
2202 * GPIO setting 2202 * GPIO setting
2203 * 2203 *
@@ -2236,9 +2236,9 @@ static const struct cx88_board cx88_boards[] = {
2236 [CX88_BOARD_WINFAST_DTV2000H_PLUS] = { 2236 [CX88_BOARD_WINFAST_DTV2000H_PLUS] = {
2237 .name = "Leadtek WinFast DTV2000 H PLUS", 2237 .name = "Leadtek WinFast DTV2000 H PLUS",
2238 .tuner_type = TUNER_XC4000, 2238 .tuner_type = TUNER_XC4000,
2239 .radio_type = TUNER_XC4000, 2239 .radio_type = UNSET,
2240 .tuner_addr = 0x61, 2240 .tuner_addr = 0x61,
2241 .radio_addr = 0x61, 2241 .radio_addr = ADDR_UNSET,
2242 /* 2242 /*
2243 * GPIO 2243 * GPIO
2244 * 2: 1: mute audio 2244 * 2: 1: mute audio
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 544af91cbdc1..3949b7dc2368 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -731,9 +731,6 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv)
731 731
732 init_kthread_work(&itv->irq_work, ivtv_irq_work_handler); 732 init_kthread_work(&itv->irq_work, ivtv_irq_work_handler);
733 733
734 /* start counting open_id at 1 */
735 itv->open_id = 1;
736
737 /* Initial settings */ 734 /* Initial settings */
738 itv->cxhdl.port = CX2341X_PORT_MEMORY; 735 itv->cxhdl.port = CX2341X_PORT_MEMORY;
739 itv->cxhdl.capabilities = CX2341X_CAP_HAS_SLICED_VBI; 736 itv->cxhdl.capabilities = CX2341X_CAP_HAS_SLICED_VBI;
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index 8f9cc17b518e..06f3d78389bf 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -332,7 +332,7 @@ struct ivtv_stream {
332 const char *name; /* name of the stream */ 332 const char *name; /* name of the stream */
333 int type; /* stream type */ 333 int type; /* stream type */
334 334
335 u32 id; 335 struct v4l2_fh *fh; /* pointer to the streaming filehandle */
336 spinlock_t qlock; /* locks access to the queues */ 336 spinlock_t qlock; /* locks access to the queues */
337 unsigned long s_flags; /* status flags, see above */ 337 unsigned long s_flags; /* status flags, see above */
338 int dma; /* can be PCI_DMA_TODEVICE, PCI_DMA_FROMDEVICE or PCI_DMA_NONE */ 338 int dma; /* can be PCI_DMA_TODEVICE, PCI_DMA_FROMDEVICE or PCI_DMA_NONE */
@@ -379,7 +379,6 @@ struct ivtv_stream {
379 379
380struct ivtv_open_id { 380struct ivtv_open_id {
381 struct v4l2_fh fh; 381 struct v4l2_fh fh;
382 u32 open_id; /* unique ID for this file descriptor */
383 int type; /* stream type */ 382 int type; /* stream type */
384 int yuv_frames; /* 1: started OUT_UDMA_YUV output mode */ 383 int yuv_frames; /* 1: started OUT_UDMA_YUV output mode */
385 struct ivtv *itv; 384 struct ivtv *itv;
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index 38f052257f46..2cd6c89b7d91 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -50,16 +50,16 @@ static int ivtv_claim_stream(struct ivtv_open_id *id, int type)
50 50
51 if (test_and_set_bit(IVTV_F_S_CLAIMED, &s->s_flags)) { 51 if (test_and_set_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
52 /* someone already claimed this stream */ 52 /* someone already claimed this stream */
53 if (s->id == id->open_id) { 53 if (s->fh == &id->fh) {
54 /* yes, this file descriptor did. So that's OK. */ 54 /* yes, this file descriptor did. So that's OK. */
55 return 0; 55 return 0;
56 } 56 }
57 if (s->id == -1 && (type == IVTV_DEC_STREAM_TYPE_VBI || 57 if (s->fh == NULL && (type == IVTV_DEC_STREAM_TYPE_VBI ||
58 type == IVTV_ENC_STREAM_TYPE_VBI)) { 58 type == IVTV_ENC_STREAM_TYPE_VBI)) {
59 /* VBI is handled already internally, now also assign 59 /* VBI is handled already internally, now also assign
60 the file descriptor to this stream for external 60 the file descriptor to this stream for external
61 reading of the stream. */ 61 reading of the stream. */
62 s->id = id->open_id; 62 s->fh = &id->fh;
63 IVTV_DEBUG_INFO("Start Read VBI\n"); 63 IVTV_DEBUG_INFO("Start Read VBI\n");
64 return 0; 64 return 0;
65 } 65 }
@@ -67,7 +67,7 @@ static int ivtv_claim_stream(struct ivtv_open_id *id, int type)
67 IVTV_DEBUG_INFO("Stream %d is busy\n", type); 67 IVTV_DEBUG_INFO("Stream %d is busy\n", type);
68 return -EBUSY; 68 return -EBUSY;
69 } 69 }
70 s->id = id->open_id; 70 s->fh = &id->fh;
71 if (type == IVTV_DEC_STREAM_TYPE_VBI) { 71 if (type == IVTV_DEC_STREAM_TYPE_VBI) {
72 /* Enable reinsertion interrupt */ 72 /* Enable reinsertion interrupt */
73 ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT); 73 ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
@@ -104,7 +104,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
104 struct ivtv *itv = s->itv; 104 struct ivtv *itv = s->itv;
105 struct ivtv_stream *s_vbi; 105 struct ivtv_stream *s_vbi;
106 106
107 s->id = -1; 107 s->fh = NULL;
108 if ((s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type == IVTV_ENC_STREAM_TYPE_VBI) && 108 if ((s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type == IVTV_ENC_STREAM_TYPE_VBI) &&
109 test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) { 109 test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
110 /* this stream is still in use internally */ 110 /* this stream is still in use internally */
@@ -136,7 +136,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
136 /* was already cleared */ 136 /* was already cleared */
137 return; 137 return;
138 } 138 }
139 if (s_vbi->id != -1) { 139 if (s_vbi->fh) {
140 /* VBI stream still claimed by a file descriptor */ 140 /* VBI stream still claimed by a file descriptor */
141 return; 141 return;
142 } 142 }
@@ -268,11 +268,13 @@ static struct ivtv_buffer *ivtv_get_buffer(struct ivtv_stream *s, int non_block,
268 } 268 }
269 269
270 /* wait for more data to arrive */ 270 /* wait for more data to arrive */
271 mutex_unlock(&itv->serialize_lock);
271 prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE); 272 prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
272 /* New buffers might have become available before we were added to the waitqueue */ 273 /* New buffers might have become available before we were added to the waitqueue */
273 if (!s->q_full.buffers) 274 if (!s->q_full.buffers)
274 schedule(); 275 schedule();
275 finish_wait(&s->waitq, &wait); 276 finish_wait(&s->waitq, &wait);
277 mutex_lock(&itv->serialize_lock);
276 if (signal_pending(current)) { 278 if (signal_pending(current)) {
277 /* return if a signal was received */ 279 /* return if a signal was received */
278 IVTV_DEBUG_INFO("User stopped %s\n", s->name); 280 IVTV_DEBUG_INFO("User stopped %s\n", s->name);
@@ -357,7 +359,7 @@ static ssize_t ivtv_read(struct ivtv_stream *s, char __user *ubuf, size_t tot_co
357 size_t tot_written = 0; 359 size_t tot_written = 0;
358 int single_frame = 0; 360 int single_frame = 0;
359 361
360 if (atomic_read(&itv->capturing) == 0 && s->id == -1) { 362 if (atomic_read(&itv->capturing) == 0 && s->fh == NULL) {
361 /* shouldn't happen */ 363 /* shouldn't happen */
362 IVTV_DEBUG_WARN("Stream %s not initialized before read\n", s->name); 364 IVTV_DEBUG_WARN("Stream %s not initialized before read\n", s->name);
363 return -EIO; 365 return -EIO;
@@ -507,9 +509,7 @@ ssize_t ivtv_v4l2_read(struct file * filp, char __user *buf, size_t count, loff_
507 509
508 IVTV_DEBUG_HI_FILE("read %zd bytes from %s\n", count, s->name); 510 IVTV_DEBUG_HI_FILE("read %zd bytes from %s\n", count, s->name);
509 511
510 mutex_lock(&itv->serialize_lock);
511 rc = ivtv_start_capture(id); 512 rc = ivtv_start_capture(id);
512 mutex_unlock(&itv->serialize_lock);
513 if (rc) 513 if (rc)
514 return rc; 514 return rc;
515 return ivtv_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK); 515 return ivtv_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK);
@@ -584,9 +584,7 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
584 set_bit(IVTV_F_S_APPL_IO, &s->s_flags); 584 set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
585 585
586 /* Start decoder (returns 0 if already started) */ 586 /* Start decoder (returns 0 if already started) */
587 mutex_lock(&itv->serialize_lock);
588 rc = ivtv_start_decoding(id, itv->speed); 587 rc = ivtv_start_decoding(id, itv->speed);
589 mutex_unlock(&itv->serialize_lock);
590 if (rc) { 588 if (rc) {
591 IVTV_DEBUG_WARN("Failed start decode stream %s\n", s->name); 589 IVTV_DEBUG_WARN("Failed start decode stream %s\n", s->name);
592 590
@@ -627,11 +625,13 @@ retry:
627 break; 625 break;
628 if (filp->f_flags & O_NONBLOCK) 626 if (filp->f_flags & O_NONBLOCK)
629 return -EAGAIN; 627 return -EAGAIN;
628 mutex_unlock(&itv->serialize_lock);
630 prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE); 629 prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
631 /* New buffers might have become free before we were added to the waitqueue */ 630 /* New buffers might have become free before we were added to the waitqueue */
632 if (!s->q_free.buffers) 631 if (!s->q_free.buffers)
633 schedule(); 632 schedule();
634 finish_wait(&s->waitq, &wait); 633 finish_wait(&s->waitq, &wait);
634 mutex_lock(&itv->serialize_lock);
635 if (signal_pending(current)) { 635 if (signal_pending(current)) {
636 IVTV_DEBUG_INFO("User stopped %s\n", s->name); 636 IVTV_DEBUG_INFO("User stopped %s\n", s->name);
637 return -EINTR; 637 return -EINTR;
@@ -686,12 +686,14 @@ retry:
686 if (mode == OUT_YUV) 686 if (mode == OUT_YUV)
687 ivtv_yuv_setup_stream_frame(itv); 687 ivtv_yuv_setup_stream_frame(itv);
688 688
689 mutex_unlock(&itv->serialize_lock);
689 prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE); 690 prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
690 while (!(got_sig = signal_pending(current)) && 691 while (!(got_sig = signal_pending(current)) &&
691 test_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) { 692 test_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) {
692 schedule(); 693 schedule();
693 } 694 }
694 finish_wait(&itv->dma_waitq, &wait); 695 finish_wait(&itv->dma_waitq, &wait);
696 mutex_lock(&itv->serialize_lock);
695 if (got_sig) { 697 if (got_sig) {
696 IVTV_DEBUG_INFO("User interrupted %s\n", s->name); 698 IVTV_DEBUG_INFO("User interrupted %s\n", s->name);
697 return -EINTR; 699 return -EINTR;
@@ -756,9 +758,7 @@ unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
756 if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) { 758 if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
757 int rc; 759 int rc;
758 760
759 mutex_lock(&itv->serialize_lock);
760 rc = ivtv_start_capture(id); 761 rc = ivtv_start_capture(id);
761 mutex_unlock(&itv->serialize_lock);
762 if (rc) { 762 if (rc) {
763 IVTV_DEBUG_INFO("Could not start capture for %s (%d)\n", 763 IVTV_DEBUG_INFO("Could not start capture for %s (%d)\n",
764 s->name, rc); 764 s->name, rc);
@@ -808,7 +808,7 @@ void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end)
808 id->type == IVTV_ENC_STREAM_TYPE_VBI) && 808 id->type == IVTV_ENC_STREAM_TYPE_VBI) &&
809 test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) { 809 test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
810 /* Also used internally, don't stop capturing */ 810 /* Also used internally, don't stop capturing */
811 s->id = -1; 811 s->fh = NULL;
812 } 812 }
813 else { 813 else {
814 ivtv_stop_v4l2_encode_stream(s, gop_end); 814 ivtv_stop_v4l2_encode_stream(s, gop_end);
@@ -861,20 +861,9 @@ int ivtv_v4l2_close(struct file *filp)
861 861
862 IVTV_DEBUG_FILE("close %s\n", s->name); 862 IVTV_DEBUG_FILE("close %s\n", s->name);
863 863
864 v4l2_fh_del(fh);
865 v4l2_fh_exit(fh);
866
867 /* Easy case first: this stream was never claimed by us */
868 if (s->id != id->open_id) {
869 kfree(id);
870 return 0;
871 }
872
873 /* 'Unclaim' this stream */
874
875 /* Stop radio */ 864 /* Stop radio */
876 mutex_lock(&itv->serialize_lock); 865 if (id->type == IVTV_ENC_STREAM_TYPE_RAD &&
877 if (id->type == IVTV_ENC_STREAM_TYPE_RAD) { 866 v4l2_fh_is_singular_file(filp)) {
878 /* Closing radio device, return to TV mode */ 867 /* Closing radio device, return to TV mode */
879 ivtv_mute(itv); 868 ivtv_mute(itv);
880 /* Mark that the radio is no longer in use */ 869 /* Mark that the radio is no longer in use */
@@ -890,13 +879,25 @@ int ivtv_v4l2_close(struct file *filp)
890 if (atomic_read(&itv->capturing) > 0) { 879 if (atomic_read(&itv->capturing) > 0) {
891 /* Undo video mute */ 880 /* Undo video mute */
892 ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1, 881 ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
893 v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) | 882 v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) |
894 (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8)); 883 (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
895 } 884 }
896 /* Done! Unmute and continue. */ 885 /* Done! Unmute and continue. */
897 ivtv_unmute(itv); 886 ivtv_unmute(itv);
898 ivtv_release_stream(s); 887 }
899 } else if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) { 888
889 v4l2_fh_del(fh);
890 v4l2_fh_exit(fh);
891
892 /* Easy case first: this stream was never claimed by us */
893 if (s->fh != &id->fh) {
894 kfree(id);
895 return 0;
896 }
897
898 /* 'Unclaim' this stream */
899
900 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
900 struct ivtv_stream *s_vout = &itv->streams[IVTV_DEC_STREAM_TYPE_VOUT]; 901 struct ivtv_stream *s_vout = &itv->streams[IVTV_DEC_STREAM_TYPE_VOUT];
901 902
902 ivtv_stop_decoding(id, VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY, 0); 903 ivtv_stop_decoding(id, VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY, 0);
@@ -911,21 +912,25 @@ int ivtv_v4l2_close(struct file *filp)
911 ivtv_stop_capture(id, 0); 912 ivtv_stop_capture(id, 0);
912 } 913 }
913 kfree(id); 914 kfree(id);
914 mutex_unlock(&itv->serialize_lock);
915 return 0; 915 return 0;
916} 916}
917 917
918static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp) 918int ivtv_v4l2_open(struct file *filp)
919{ 919{
920#ifdef CONFIG_VIDEO_ADV_DEBUG
921 struct video_device *vdev = video_devdata(filp); 920 struct video_device *vdev = video_devdata(filp);
922#endif 921 struct ivtv_stream *s = video_get_drvdata(vdev);
923 struct ivtv *itv = s->itv; 922 struct ivtv *itv = s->itv;
924 struct ivtv_open_id *item; 923 struct ivtv_open_id *item;
925 int res = 0; 924 int res = 0;
926 925
927 IVTV_DEBUG_FILE("open %s\n", s->name); 926 IVTV_DEBUG_FILE("open %s\n", s->name);
928 927
928 if (ivtv_init_on_first_open(itv)) {
929 IVTV_ERR("Failed to initialize on device %s\n",
930 video_device_node_name(vdev));
931 return -ENXIO;
932 }
933
929#ifdef CONFIG_VIDEO_ADV_DEBUG 934#ifdef CONFIG_VIDEO_ADV_DEBUG
930 /* Unless ivtv_fw_debug is set, error out if firmware dead. */ 935 /* Unless ivtv_fw_debug is set, error out if firmware dead. */
931 if (ivtv_fw_debug) { 936 if (ivtv_fw_debug) {
@@ -966,31 +971,19 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
966 return -ENOMEM; 971 return -ENOMEM;
967 } 972 }
968 v4l2_fh_init(&item->fh, s->vdev); 973 v4l2_fh_init(&item->fh, s->vdev);
969 if (res < 0) {
970 v4l2_fh_exit(&item->fh);
971 kfree(item);
972 return res;
973 }
974 item->itv = itv; 974 item->itv = itv;
975 item->type = s->type; 975 item->type = s->type;
976 976
977 item->open_id = itv->open_id++;
978 filp->private_data = &item->fh; 977 filp->private_data = &item->fh;
978 v4l2_fh_add(&item->fh);
979 979
980 if (item->type == IVTV_ENC_STREAM_TYPE_RAD) { 980 if (item->type == IVTV_ENC_STREAM_TYPE_RAD &&
981 /* Try to claim this stream */ 981 v4l2_fh_is_singular_file(filp)) {
982 if (ivtv_claim_stream(item, item->type)) {
983 /* No, it's already in use */
984 v4l2_fh_exit(&item->fh);
985 kfree(item);
986 return -EBUSY;
987 }
988
989 if (!test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) { 982 if (!test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
990 if (atomic_read(&itv->capturing) > 0) { 983 if (atomic_read(&itv->capturing) > 0) {
991 /* switching to radio while capture is 984 /* switching to radio while capture is
992 in progress is not polite */ 985 in progress is not polite */
993 ivtv_release_stream(s); 986 v4l2_fh_del(&item->fh);
994 v4l2_fh_exit(&item->fh); 987 v4l2_fh_exit(&item->fh);
995 kfree(item); 988 kfree(item);
996 return -EBUSY; 989 return -EBUSY;
@@ -1022,32 +1015,9 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
1022 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31); 1015 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
1023 itv->yuv_info.stream_size = 0; 1016 itv->yuv_info.stream_size = 0;
1024 } 1017 }
1025 v4l2_fh_add(&item->fh);
1026 return 0; 1018 return 0;
1027} 1019}
1028 1020
1029int ivtv_v4l2_open(struct file *filp)
1030{
1031 int res;
1032 struct ivtv *itv = NULL;
1033 struct ivtv_stream *s = NULL;
1034 struct video_device *vdev = video_devdata(filp);
1035
1036 s = video_get_drvdata(vdev);
1037 itv = s->itv;
1038
1039 mutex_lock(&itv->serialize_lock);
1040 if (ivtv_init_on_first_open(itv)) {
1041 IVTV_ERR("Failed to initialize on device %s\n",
1042 video_device_node_name(vdev));
1043 mutex_unlock(&itv->serialize_lock);
1044 return -ENXIO;
1045 }
1046 res = ivtv_serialized_open(s, filp);
1047 mutex_unlock(&itv->serialize_lock);
1048 return res;
1049}
1050
1051void ivtv_mute(struct ivtv *itv) 1021void ivtv_mute(struct ivtv *itv)
1052{ 1022{
1053 if (atomic_read(&itv->capturing)) 1023 if (atomic_read(&itv->capturing))
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index ecafa697326e..c4bc48143098 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -179,6 +179,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed)
179 ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, 0); 179 ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, 0);
180 180
181 /* Wait for any DMA to finish */ 181 /* Wait for any DMA to finish */
182 mutex_unlock(&itv->serialize_lock);
182 prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE); 183 prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
183 while (test_bit(IVTV_F_I_DMA, &itv->i_flags)) { 184 while (test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
184 got_sig = signal_pending(current); 185 got_sig = signal_pending(current);
@@ -188,6 +189,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed)
188 schedule(); 189 schedule();
189 } 190 }
190 finish_wait(&itv->dma_waitq, &wait); 191 finish_wait(&itv->dma_waitq, &wait);
192 mutex_lock(&itv->serialize_lock);
191 if (got_sig) 193 if (got_sig)
192 return -EINTR; 194 return -EINTR;
193 195
@@ -1107,6 +1109,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
1107 * happens within the first 100 lines of the top field. 1109 * happens within the first 100 lines of the top field.
1108 * Make 4 attempts to sync to the decoder before giving up. 1110 * Make 4 attempts to sync to the decoder before giving up.
1109 */ 1111 */
1112 mutex_unlock(&itv->serialize_lock);
1110 for (f = 0; f < 4; f++) { 1113 for (f = 0; f < 4; f++) {
1111 prepare_to_wait(&itv->vsync_waitq, &wait, 1114 prepare_to_wait(&itv->vsync_waitq, &wait,
1112 TASK_UNINTERRUPTIBLE); 1115 TASK_UNINTERRUPTIBLE);
@@ -1115,6 +1118,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
1115 schedule_timeout(msecs_to_jiffies(25)); 1118 schedule_timeout(msecs_to_jiffies(25));
1116 } 1119 }
1117 finish_wait(&itv->vsync_waitq, &wait); 1120 finish_wait(&itv->vsync_waitq, &wait);
1121 mutex_lock(&itv->serialize_lock);
1118 1122
1119 if (f == 4) 1123 if (f == 4)
1120 IVTV_WARN("Mode change failed to sync to decoder\n"); 1124 IVTV_WARN("Mode change failed to sync to decoder\n");
@@ -1842,8 +1846,7 @@ static long ivtv_default(struct file *file, void *fh, bool valid_prio,
1842 return 0; 1846 return 0;
1843} 1847}
1844 1848
1845static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp, 1849long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1846 unsigned int cmd, unsigned long arg)
1847{ 1850{
1848 struct video_device *vfd = video_devdata(filp); 1851 struct video_device *vfd = video_devdata(filp);
1849 long ret; 1852 long ret;
@@ -1855,21 +1858,6 @@ static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
1855 return ret; 1858 return ret;
1856} 1859}
1857 1860
1858long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1859{
1860 struct ivtv_open_id *id = fh2id(filp->private_data);
1861 struct ivtv *itv = id->itv;
1862 long res;
1863
1864 /* DQEVENT can block, so this should not run with the serialize lock */
1865 if (cmd == VIDIOC_DQEVENT)
1866 return ivtv_serialized_ioctl(itv, filp, cmd, arg);
1867 mutex_lock(&itv->serialize_lock);
1868 res = ivtv_serialized_ioctl(itv, filp, cmd, arg);
1869 mutex_unlock(&itv->serialize_lock);
1870 return res;
1871}
1872
1873static const struct v4l2_ioctl_ops ivtv_ioctl_ops = { 1861static const struct v4l2_ioctl_ops ivtv_ioctl_ops = {
1874 .vidioc_querycap = ivtv_querycap, 1862 .vidioc_querycap = ivtv_querycap,
1875 .vidioc_s_audio = ivtv_s_audio, 1863 .vidioc_s_audio = ivtv_s_audio,
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index 9c29e964d400..1b3b9578bf47 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -288,13 +288,13 @@ static void dma_post(struct ivtv_stream *s)
288 ivtv_process_vbi_data(itv, buf, 0, s->type); 288 ivtv_process_vbi_data(itv, buf, 0, s->type);
289 s->q_dma.bytesused += buf->bytesused; 289 s->q_dma.bytesused += buf->bytesused;
290 } 290 }
291 if (s->id == -1) { 291 if (s->fh == NULL) {
292 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0); 292 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
293 return; 293 return;
294 } 294 }
295 } 295 }
296 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused); 296 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
297 if (s->id != -1) 297 if (s->fh)
298 wake_up(&s->waitq); 298 wake_up(&s->waitq);
299} 299}
300 300
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index e7794dc1330e..c6e28b4ebbed 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -159,7 +159,6 @@ static void ivtv_stream_init(struct ivtv *itv, int type)
159 s->buffers = (itv->options.kilobytes[type] * 1024 + s->buf_size - 1) / s->buf_size; 159 s->buffers = (itv->options.kilobytes[type] * 1024 + s->buf_size - 1) / s->buf_size;
160 spin_lock_init(&s->qlock); 160 spin_lock_init(&s->qlock);
161 init_waitqueue_head(&s->waitq); 161 init_waitqueue_head(&s->waitq);
162 s->id = -1;
163 s->sg_handle = IVTV_DMA_UNMAPPED; 162 s->sg_handle = IVTV_DMA_UNMAPPED;
164 ivtv_queue_init(&s->q_free); 163 ivtv_queue_init(&s->q_free);
165 ivtv_queue_init(&s->q_full); 164 ivtv_queue_init(&s->q_full);
@@ -214,6 +213,7 @@ static int ivtv_prep_dev(struct ivtv *itv, int type)
214 s->vdev->fops = ivtv_stream_info[type].fops; 213 s->vdev->fops = ivtv_stream_info[type].fops;
215 s->vdev->release = video_device_release; 214 s->vdev->release = video_device_release;
216 s->vdev->tvnorms = V4L2_STD_ALL; 215 s->vdev->tvnorms = V4L2_STD_ALL;
216 s->vdev->lock = &itv->serialize_lock;
217 set_bit(V4L2_FL_USE_FH_PRIO, &s->vdev->flags); 217 set_bit(V4L2_FL_USE_FH_PRIO, &s->vdev->flags);
218 ivtv_set_funcs(s->vdev); 218 ivtv_set_funcs(s->vdev);
219 return 0; 219 return 0;
diff --git a/drivers/media/video/ivtv/ivtv-yuv.c b/drivers/media/video/ivtv/ivtv-yuv.c
index dcbab6ad4c26..2ad65eb29832 100644
--- a/drivers/media/video/ivtv/ivtv-yuv.c
+++ b/drivers/media/video/ivtv/ivtv-yuv.c
@@ -1149,23 +1149,37 @@ int ivtv_yuv_udma_stream_frame(struct ivtv *itv, void __user *src)
1149{ 1149{
1150 struct yuv_playback_info *yi = &itv->yuv_info; 1150 struct yuv_playback_info *yi = &itv->yuv_info;
1151 struct ivtv_dma_frame dma_args; 1151 struct ivtv_dma_frame dma_args;
1152 int res;
1152 1153
1153 ivtv_yuv_setup_stream_frame(itv); 1154 ivtv_yuv_setup_stream_frame(itv);
1154 1155
1155 /* We only need to supply source addresses for this */ 1156 /* We only need to supply source addresses for this */
1156 dma_args.y_source = src; 1157 dma_args.y_source = src;
1157 dma_args.uv_source = src + 720 * ((yi->v4l2_src_h + 31) & ~31); 1158 dma_args.uv_source = src + 720 * ((yi->v4l2_src_h + 31) & ~31);
1158 return ivtv_yuv_udma_frame(itv, &dma_args); 1159 /* Wait for frame DMA. Note that serialize_lock is locked,
1160 so to allow other processes to access the driver while
1161 we are waiting unlock first and later lock again. */
1162 mutex_unlock(&itv->serialize_lock);
1163 res = ivtv_yuv_udma_frame(itv, &dma_args);
1164 mutex_lock(&itv->serialize_lock);
1165 return res;
1159} 1166}
1160 1167
1161/* IVTV_IOC_DMA_FRAME ioctl handler */ 1168/* IVTV_IOC_DMA_FRAME ioctl handler */
1162int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args) 1169int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args)
1163{ 1170{
1164/* IVTV_DEBUG_INFO("yuv_prep_frame\n"); */ 1171 int res;
1165 1172
1173/* IVTV_DEBUG_INFO("yuv_prep_frame\n"); */
1166 ivtv_yuv_next_free(itv); 1174 ivtv_yuv_next_free(itv);
1167 ivtv_yuv_setup_frame(itv, args); 1175 ivtv_yuv_setup_frame(itv, args);
1168 return ivtv_yuv_udma_frame(itv, args); 1176 /* Wait for frame DMA. Note that serialize_lock is locked,
1177 so to allow other processes to access the driver while
1178 we are waiting unlock first and later lock again. */
1179 mutex_unlock(&itv->serialize_lock);
1180 res = ivtv_yuv_udma_frame(itv, args);
1181 mutex_lock(&itv->serialize_lock);
1182 return res;
1169} 1183}
1170 1184
1171void ivtv_yuv_close(struct ivtv *itv) 1185void ivtv_yuv_close(struct ivtv *itv)
@@ -1174,7 +1188,9 @@ void ivtv_yuv_close(struct ivtv *itv)
1174 int h_filter, v_filter_1, v_filter_2; 1188 int h_filter, v_filter_1, v_filter_2;
1175 1189
1176 IVTV_DEBUG_YUV("ivtv_yuv_close\n"); 1190 IVTV_DEBUG_YUV("ivtv_yuv_close\n");
1191 mutex_unlock(&itv->serialize_lock);
1177 ivtv_waitq(&itv->vsync_waitq); 1192 ivtv_waitq(&itv->vsync_waitq);
1193 mutex_lock(&itv->serialize_lock);
1178 1194
1179 yi->running = 0; 1195 yi->running = 0;
1180 atomic_set(&yi->next_dma_frame, -1); 1196 atomic_set(&yi->next_dma_frame, -1);
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index a277f95091ef..1fb7d5bd5ec2 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -1042,7 +1042,8 @@ static int vidioc_querycap(struct file *file, void *fh,
1042 strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver)); 1042 strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
1043 strlcpy(cap->card, vout->vfd->name, sizeof(cap->card)); 1043 strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
1044 cap->bus_info[0] = '\0'; 1044 cap->bus_info[0] = '\0';
1045 cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT; 1045 cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT |
1046 V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
1046 1047
1047 return 0; 1048 return 0;
1048} 1049}
@@ -1825,7 +1826,9 @@ static int vidioc_g_fbuf(struct file *file, void *fh,
1825 ovid = &vout->vid_info; 1826 ovid = &vout->vid_info;
1826 ovl = ovid->overlays[0]; 1827 ovl = ovid->overlays[0];
1827 1828
1828 a->flags = 0x0; 1829 /* The video overlay must stay within the framebuffer and can't be
1830 positioned independently. */
1831 a->flags = V4L2_FBUF_FLAG_OVERLAY;
1829 a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY 1832 a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY
1830 | V4L2_FBUF_CAP_SRC_CHROMAKEY; 1833 | V4L2_FBUF_CAP_SRC_CHROMAKEY;
1831 1834
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index 905d41d90c6a..1f506fde97d0 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -104,47 +104,16 @@ static struct Nala_table_entry Nala_table[PSZ_MAX][PWC_FPS_MAX_NALA] =
104 104
105/****************************************************************************/ 105/****************************************************************************/
106 106
107static int _send_control_msg(struct pwc_device *pdev,
108 u8 request, u16 value, int index, void *buf, int buflen)
109{
110 int rc;
111 void *kbuf = NULL;
112
113 if (buflen) {
114 kbuf = kmemdup(buf, buflen, GFP_KERNEL); /* not allowed on stack */
115 if (kbuf == NULL)
116 return -ENOMEM;
117 }
118
119 rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
120 request,
121 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
122 value,
123 index,
124 kbuf, buflen, USB_CTRL_SET_TIMEOUT);
125
126 kfree(kbuf);
127 return rc;
128}
129
130static int recv_control_msg(struct pwc_device *pdev, 107static int recv_control_msg(struct pwc_device *pdev,
131 u8 request, u16 value, void *buf, int buflen) 108 u8 request, u16 value, int recv_count)
132{ 109{
133 int rc; 110 int rc;
134 void *kbuf = kmalloc(buflen, GFP_KERNEL); /* not allowed on stack */
135
136 if (kbuf == NULL)
137 return -ENOMEM;
138 111
139 rc = usb_control_msg(pdev->udev, usb_rcvctrlpipe(pdev->udev, 0), 112 rc = usb_control_msg(pdev->udev, usb_rcvctrlpipe(pdev->udev, 0),
140 request, 113 request,
141 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 114 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
142 value, 115 value, pdev->vcinterface,
143 pdev->vcinterface, 116 pdev->ctrl_buf, recv_count, USB_CTRL_GET_TIMEOUT);
144 kbuf, buflen, USB_CTRL_GET_TIMEOUT);
145 memcpy(buf, kbuf, buflen);
146 kfree(kbuf);
147
148 if (rc < 0) 117 if (rc < 0)
149 PWC_ERROR("recv_control_msg error %d req %02x val %04x\n", 118 PWC_ERROR("recv_control_msg error %d req %02x val %04x\n",
150 rc, request, value); 119 rc, request, value);
@@ -152,27 +121,39 @@ static int recv_control_msg(struct pwc_device *pdev,
152} 121}
153 122
154static inline int send_video_command(struct pwc_device *pdev, 123static inline int send_video_command(struct pwc_device *pdev,
155 int index, void *buf, int buflen) 124 int index, const unsigned char *buf, int buflen)
156{ 125{
157 return _send_control_msg(pdev, 126 int rc;
158 SET_EP_STREAM_CTL, 127
159 VIDEO_OUTPUT_CONTROL_FORMATTER, 128 memcpy(pdev->ctrl_buf, buf, buflen);
160 index, 129
161 buf, buflen); 130 rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
131 SET_EP_STREAM_CTL,
132 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
133 VIDEO_OUTPUT_CONTROL_FORMATTER, index,
134 pdev->ctrl_buf, buflen, USB_CTRL_SET_TIMEOUT);
135 if (rc >= 0)
136 memcpy(pdev->cmd_buf, buf, buflen);
137 else
138 PWC_ERROR("send_video_command error %d\n", rc);
139
140 return rc;
162} 141}
163 142
164int send_control_msg(struct pwc_device *pdev, 143int send_control_msg(struct pwc_device *pdev,
165 u8 request, u16 value, void *buf, int buflen) 144 u8 request, u16 value, void *buf, int buflen)
166{ 145{
167 return _send_control_msg(pdev, 146 return usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
168 request, value, pdev->vcinterface, buf, buflen); 147 request,
148 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
149 value, pdev->vcinterface,
150 buf, buflen, USB_CTRL_SET_TIMEOUT);
169} 151}
170 152
171static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames, 153static int set_video_mode_Nala(struct pwc_device *pdev, int size, int pixfmt,
172 int *compression) 154 int frames, int *compression, int send_to_cam)
173{ 155{
174 unsigned char buf[3]; 156 int fps, ret = 0;
175 int ret, fps;
176 struct Nala_table_entry *pEntry; 157 struct Nala_table_entry *pEntry;
177 int frames2frames[31] = 158 int frames2frames[31] =
178 { /* closest match of framerate */ 159 { /* closest match of framerate */
@@ -194,30 +175,29 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames,
194 7 /* 30 */ 175 7 /* 30 */
195 }; 176 };
196 177
197 if (size < 0 || size > PSZ_CIF || frames < 4 || frames > 25) 178 if (size < 0 || size > PSZ_CIF)
198 return -EINVAL; 179 return -EINVAL;
180 if (frames < 4)
181 frames = 4;
182 else if (frames > 25)
183 frames = 25;
199 frames = frames2frames[frames]; 184 frames = frames2frames[frames];
200 fps = frames2table[frames]; 185 fps = frames2table[frames];
201 pEntry = &Nala_table[size][fps]; 186 pEntry = &Nala_table[size][fps];
202 if (pEntry->alternate == 0) 187 if (pEntry->alternate == 0)
203 return -EINVAL; 188 return -EINVAL;
204 189
205 memcpy(buf, pEntry->mode, 3); 190 if (send_to_cam)
206 ret = send_video_command(pdev, pdev->vendpoint, buf, 3); 191 ret = send_video_command(pdev, pdev->vendpoint,
207 if (ret < 0) { 192 pEntry->mode, 3);
208 PWC_DEBUG_MODULE("Failed to send video command... %d\n", ret); 193 if (ret < 0)
209 return ret; 194 return ret;
210 }
211 if (pEntry->compressed && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
212 ret = pwc_dec1_init(pdev, pdev->type, pdev->release, buf);
213 if (ret < 0)
214 return ret;
215 }
216 195
217 pdev->cmd_len = 3; 196 if (pEntry->compressed && pixfmt == V4L2_PIX_FMT_YUV420)
218 memcpy(pdev->cmd_buf, buf, 3); 197 pwc_dec1_init(pdev, pEntry->mode);
219 198
220 /* Set various parameters */ 199 /* Set various parameters */
200 pdev->pixfmt = pixfmt;
221 pdev->vframes = frames; 201 pdev->vframes = frames;
222 pdev->valternate = pEntry->alternate; 202 pdev->valternate = pEntry->alternate;
223 pdev->width = pwc_image_sizes[size][0]; 203 pdev->width = pwc_image_sizes[size][0];
@@ -243,18 +223,20 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames,
243} 223}
244 224
245 225
246static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames, 226static int set_video_mode_Timon(struct pwc_device *pdev, int size, int pixfmt,
247 int *compression) 227 int frames, int *compression, int send_to_cam)
248{ 228{
249 unsigned char buf[13];
250 const struct Timon_table_entry *pChoose; 229 const struct Timon_table_entry *pChoose;
251 int ret, fps; 230 int fps, ret = 0;
252 231
253 if (size >= PSZ_MAX || frames < 5 || frames > 30 || 232 if (size >= PSZ_MAX || *compression < 0 || *compression > 3)
254 *compression < 0 || *compression > 3)
255 return -EINVAL;
256 if (size == PSZ_VGA && frames > 15)
257 return -EINVAL; 233 return -EINVAL;
234 if (frames < 5)
235 frames = 5;
236 else if (size == PSZ_VGA && frames > 15)
237 frames = 15;
238 else if (frames > 30)
239 frames = 30;
258 fps = (frames / 5) - 1; 240 fps = (frames / 5) - 1;
259 241
260 /* Find a supported framerate with progressively higher compression */ 242 /* Find a supported framerate with progressively higher compression */
@@ -268,22 +250,18 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames,
268 if (pChoose == NULL || pChoose->alternate == 0) 250 if (pChoose == NULL || pChoose->alternate == 0)
269 return -ENOENT; /* Not supported. */ 251 return -ENOENT; /* Not supported. */
270 252
271 memcpy(buf, pChoose->mode, 13); 253 if (send_to_cam)
272 ret = send_video_command(pdev, pdev->vendpoint, buf, 13); 254 ret = send_video_command(pdev, pdev->vendpoint,
255 pChoose->mode, 13);
273 if (ret < 0) 256 if (ret < 0)
274 return ret; 257 return ret;
275 258
276 if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) { 259 if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420)
277 ret = pwc_dec23_init(pdev, pdev->type, buf); 260 pwc_dec23_init(pdev, pChoose->mode);
278 if (ret < 0)
279 return ret;
280 }
281
282 pdev->cmd_len = 13;
283 memcpy(pdev->cmd_buf, buf, 13);
284 261
285 /* Set various parameters */ 262 /* Set various parameters */
286 pdev->vframes = frames; 263 pdev->pixfmt = pixfmt;
264 pdev->vframes = (fps + 1) * 5;
287 pdev->valternate = pChoose->alternate; 265 pdev->valternate = pChoose->alternate;
288 pdev->width = pwc_image_sizes[size][0]; 266 pdev->width = pwc_image_sizes[size][0];
289 pdev->height = pwc_image_sizes[size][1]; 267 pdev->height = pwc_image_sizes[size][1];
@@ -296,18 +274,20 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames,
296} 274}
297 275
298 276
299static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames, 277static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int pixfmt,
300 int *compression) 278 int frames, int *compression, int send_to_cam)
301{ 279{
302 const struct Kiara_table_entry *pChoose = NULL; 280 const struct Kiara_table_entry *pChoose = NULL;
303 int fps, ret; 281 int fps, ret = 0;
304 unsigned char buf[12];
305 282
306 if (size >= PSZ_MAX || frames < 5 || frames > 30 || 283 if (size >= PSZ_MAX || *compression < 0 || *compression > 3)
307 *compression < 0 || *compression > 3)
308 return -EINVAL;
309 if (size == PSZ_VGA && frames > 15)
310 return -EINVAL; 284 return -EINVAL;
285 if (frames < 5)
286 frames = 5;
287 else if (size == PSZ_VGA && frames > 15)
288 frames = 15;
289 else if (frames > 30)
290 frames = 30;
311 fps = (frames / 5) - 1; 291 fps = (frames / 5) - 1;
312 292
313 /* Find a supported framerate with progressively higher compression */ 293 /* Find a supported framerate with progressively higher compression */
@@ -320,26 +300,18 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames,
320 if (pChoose == NULL || pChoose->alternate == 0) 300 if (pChoose == NULL || pChoose->alternate == 0)
321 return -ENOENT; /* Not supported. */ 301 return -ENOENT; /* Not supported. */
322 302
323 PWC_TRACE("Using alternate setting %d.\n", pChoose->alternate);
324
325 /* usb_control_msg won't take staticly allocated arrays as argument?? */
326 memcpy(buf, pChoose->mode, 12);
327
328 /* Firmware bug: video endpoint is 5, but commands are sent to endpoint 4 */ 303 /* Firmware bug: video endpoint is 5, but commands are sent to endpoint 4 */
329 ret = send_video_command(pdev, 4 /* pdev->vendpoint */, buf, 12); 304 if (send_to_cam)
305 ret = send_video_command(pdev, 4, pChoose->mode, 12);
330 if (ret < 0) 306 if (ret < 0)
331 return ret; 307 return ret;
332 308
333 if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) { 309 if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420)
334 ret = pwc_dec23_init(pdev, pdev->type, buf); 310 pwc_dec23_init(pdev, pChoose->mode);
335 if (ret < 0)
336 return ret;
337 }
338 311
339 pdev->cmd_len = 12;
340 memcpy(pdev->cmd_buf, buf, 12);
341 /* All set and go */ 312 /* All set and go */
342 pdev->vframes = frames; 313 pdev->pixfmt = pixfmt;
314 pdev->vframes = (fps + 1) * 5;
343 pdev->valternate = pChoose->alternate; 315 pdev->valternate = pChoose->alternate;
344 pdev->width = pwc_image_sizes[size][0]; 316 pdev->width = pwc_image_sizes[size][0];
345 pdev->height = pwc_image_sizes[size][1]; 317 pdev->height = pwc_image_sizes[size][1];
@@ -354,22 +326,24 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames,
354} 326}
355 327
356int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, 328int pwc_set_video_mode(struct pwc_device *pdev, int width, int height,
357 int frames, int *compression) 329 int pixfmt, int frames, int *compression, int send_to_cam)
358{ 330{
359 int ret, size; 331 int ret, size;
360 332
361 PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n", width, height, frames, pdev->pixfmt); 333 PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n",
334 width, height, frames, pixfmt);
362 size = pwc_get_size(pdev, width, height); 335 size = pwc_get_size(pdev, width, height);
363 PWC_TRACE("decode_size = %d.\n", size); 336 PWC_TRACE("decode_size = %d.\n", size);
364 337
365 if (DEVICE_USE_CODEC1(pdev->type)) { 338 if (DEVICE_USE_CODEC1(pdev->type)) {
366 ret = set_video_mode_Nala(pdev, size, frames, compression); 339 ret = set_video_mode_Nala(pdev, size, pixfmt, frames,
367 340 compression, send_to_cam);
368 } else if (DEVICE_USE_CODEC3(pdev->type)) { 341 } else if (DEVICE_USE_CODEC3(pdev->type)) {
369 ret = set_video_mode_Kiara(pdev, size, frames, compression); 342 ret = set_video_mode_Kiara(pdev, size, pixfmt, frames,
370 343 compression, send_to_cam);
371 } else { 344 } else {
372 ret = set_video_mode_Timon(pdev, size, frames, compression); 345 ret = set_video_mode_Timon(pdev, size, pixfmt, frames,
346 compression, send_to_cam);
373 } 347 }
374 if (ret < 0) { 348 if (ret < 0) {
375 PWC_ERROR("Failed to set video mode %s@%d fps; return code = %d\n", size2name[size], frames, ret); 349 PWC_ERROR("Failed to set video mode %s@%d fps; return code = %d\n", size2name[size], frames, ret);
@@ -436,13 +410,12 @@ unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned i
436int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data) 410int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
437{ 411{
438 int ret; 412 int ret;
439 u8 buf;
440 413
441 ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf)); 414 ret = recv_control_msg(pdev, request, value, 1);
442 if (ret < 0) 415 if (ret < 0)
443 return ret; 416 return ret;
444 417
445 *data = buf; 418 *data = pdev->ctrl_buf[0];
446 return 0; 419 return 0;
447} 420}
448 421
@@ -450,7 +423,8 @@ int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data)
450{ 423{
451 int ret; 424 int ret;
452 425
453 ret = send_control_msg(pdev, request, value, &data, sizeof(data)); 426 pdev->ctrl_buf[0] = data;
427 ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 1);
454 if (ret < 0) 428 if (ret < 0)
455 return ret; 429 return ret;
456 430
@@ -460,37 +434,34 @@ int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data)
460int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data) 434int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
461{ 435{
462 int ret; 436 int ret;
463 s8 buf;
464 437
465 ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf)); 438 ret = recv_control_msg(pdev, request, value, 1);
466 if (ret < 0) 439 if (ret < 0)
467 return ret; 440 return ret;
468 441
469 *data = buf; 442 *data = ((s8 *)pdev->ctrl_buf)[0];
470 return 0; 443 return 0;
471} 444}
472 445
473int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data) 446int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
474{ 447{
475 int ret; 448 int ret;
476 u8 buf[2];
477 449
478 ret = recv_control_msg(pdev, request, value, buf, sizeof(buf)); 450 ret = recv_control_msg(pdev, request, value, 2);
479 if (ret < 0) 451 if (ret < 0)
480 return ret; 452 return ret;
481 453
482 *data = (buf[1] << 8) | buf[0]; 454 *data = (pdev->ctrl_buf[1] << 8) | pdev->ctrl_buf[0];
483 return 0; 455 return 0;
484} 456}
485 457
486int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data) 458int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data)
487{ 459{
488 int ret; 460 int ret;
489 u8 buf[2];
490 461
491 buf[0] = data & 0xff; 462 pdev->ctrl_buf[0] = data & 0xff;
492 buf[1] = data >> 8; 463 pdev->ctrl_buf[1] = data >> 8;
493 ret = send_control_msg(pdev, request, value, buf, sizeof(buf)); 464 ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 2);
494 if (ret < 0) 465 if (ret < 0)
495 return ret; 466 return ret;
496 467
@@ -511,7 +482,6 @@ int pwc_button_ctrl(struct pwc_device *pdev, u16 value)
511/* POWER */ 482/* POWER */
512void pwc_camera_power(struct pwc_device *pdev, int power) 483void pwc_camera_power(struct pwc_device *pdev, int power)
513{ 484{
514 char buf;
515 int r; 485 int r;
516 486
517 if (!pdev->power_save) 487 if (!pdev->power_save)
@@ -521,13 +491,11 @@ void pwc_camera_power(struct pwc_device *pdev, int power)
521 return; /* Not supported by Nala or Timon < release 6 */ 491 return; /* Not supported by Nala or Timon < release 6 */
522 492
523 if (power) 493 if (power)
524 buf = 0x00; /* active */ 494 pdev->ctrl_buf[0] = 0x00; /* active */
525 else 495 else
526 buf = 0xFF; /* power save */ 496 pdev->ctrl_buf[0] = 0xFF; /* power save */
527 r = send_control_msg(pdev, 497 r = send_control_msg(pdev, SET_STATUS_CTL,
528 SET_STATUS_CTL, SET_POWER_SAVE_MODE_FORMATTER, 498 SET_POWER_SAVE_MODE_FORMATTER, pdev->ctrl_buf, 1);
529 &buf, sizeof(buf));
530
531 if (r < 0) 499 if (r < 0)
532 PWC_ERROR("Failed to power %s camera (%d)\n", 500 PWC_ERROR("Failed to power %s camera (%d)\n",
533 power ? "on" : "off", r); 501 power ? "on" : "off", r);
@@ -535,7 +503,6 @@ void pwc_camera_power(struct pwc_device *pdev, int power)
535 503
536int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value) 504int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
537{ 505{
538 unsigned char buf[2];
539 int r; 506 int r;
540 507
541 if (pdev->type < 730) 508 if (pdev->type < 730)
@@ -551,11 +518,11 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
551 if (off_value > 0xff) 518 if (off_value > 0xff)
552 off_value = 0xff; 519 off_value = 0xff;
553 520
554 buf[0] = on_value; 521 pdev->ctrl_buf[0] = on_value;
555 buf[1] = off_value; 522 pdev->ctrl_buf[1] = off_value;
556 523
557 r = send_control_msg(pdev, 524 r = send_control_msg(pdev,
558 SET_STATUS_CTL, LED_FORMATTER, &buf, sizeof(buf)); 525 SET_STATUS_CTL, LED_FORMATTER, pdev->ctrl_buf, 2);
559 if (r < 0) 526 if (r < 0)
560 PWC_ERROR("Failed to set LED on/off time (%d)\n", r); 527 PWC_ERROR("Failed to set LED on/off time (%d)\n", r);
561 528
@@ -565,7 +532,6 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
565#ifdef CONFIG_USB_PWC_DEBUG 532#ifdef CONFIG_USB_PWC_DEBUG
566int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor) 533int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
567{ 534{
568 unsigned char buf;
569 int ret = -1, request; 535 int ret = -1, request;
570 536
571 if (pdev->type < 675) 537 if (pdev->type < 675)
@@ -575,14 +541,13 @@ int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
575 else 541 else
576 request = SENSOR_TYPE_FORMATTER2; 542 request = SENSOR_TYPE_FORMATTER2;
577 543
578 ret = recv_control_msg(pdev, 544 ret = recv_control_msg(pdev, GET_STATUS_CTL, request, 1);
579 GET_STATUS_CTL, request, &buf, sizeof(buf));
580 if (ret < 0) 545 if (ret < 0)
581 return ret; 546 return ret;
582 if (pdev->type < 675) 547 if (pdev->type < 675)
583 *sensor = buf | 0x100; 548 *sensor = pdev->ctrl_buf[0] | 0x100;
584 else 549 else
585 *sensor = buf; 550 *sensor = pdev->ctrl_buf[0];
586 return 0; 551 return 0;
587} 552}
588#endif 553#endif
diff --git a/drivers/media/video/pwc/pwc-dec1.c b/drivers/media/video/pwc/pwc-dec1.c
index be0e02cb487f..e899036aadf4 100644
--- a/drivers/media/video/pwc/pwc-dec1.c
+++ b/drivers/media/video/pwc/pwc-dec1.c
@@ -22,19 +22,11 @@
22 along with this program; if not, write to the Free Software 22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24*/ 24*/
25#include "pwc-dec1.h" 25#include "pwc.h"
26 26
27int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer) 27void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd)
28{ 28{
29 struct pwc_dec1_private *pdec; 29 struct pwc_dec1_private *pdec = &pdev->dec1;
30 30
31 if (pwc->decompress_data == NULL) { 31 pdec->version = pdev->release;
32 pdec = kmalloc(sizeof(struct pwc_dec1_private), GFP_KERNEL);
33 if (pdec == NULL)
34 return -ENOMEM;
35 pwc->decompress_data = pdec;
36 }
37 pdec = pwc->decompress_data;
38
39 return 0;
40} 32}
diff --git a/drivers/media/video/pwc/pwc-dec1.h b/drivers/media/video/pwc/pwc-dec1.h
index a57d8601080b..c565ef8f52fb 100644
--- a/drivers/media/video/pwc/pwc-dec1.h
+++ b/drivers/media/video/pwc/pwc-dec1.h
@@ -25,13 +25,15 @@
25#ifndef PWC_DEC1_H 25#ifndef PWC_DEC1_H
26#define PWC_DEC1_H 26#define PWC_DEC1_H
27 27
28#include "pwc.h" 28#include <linux/mutex.h>
29
30struct pwc_device;
29 31
30struct pwc_dec1_private 32struct pwc_dec1_private
31{ 33{
32 int version; 34 int version;
33}; 35};
34 36
35int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer); 37void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd);
36 38
37#endif 39#endif
diff --git a/drivers/media/video/pwc/pwc-dec23.c b/drivers/media/video/pwc/pwc-dec23.c
index 2c6709112b2f..3792fedff951 100644
--- a/drivers/media/video/pwc/pwc-dec23.c
+++ b/drivers/media/video/pwc/pwc-dec23.c
@@ -294,22 +294,17 @@ static unsigned char pwc_crop_table[256 + 2*MAX_OUTER_CROP_VALUE];
294 294
295 295
296/* If the type or the command change, we rebuild the lookup table */ 296/* If the type or the command change, we rebuild the lookup table */
297int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd) 297void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd)
298{ 298{
299 int flags, version, shift, i; 299 int flags, version, shift, i;
300 struct pwc_dec23_private *pdec; 300 struct pwc_dec23_private *pdec = &pdev->dec23;
301
302 if (pwc->decompress_data == NULL) {
303 pdec = kmalloc(sizeof(struct pwc_dec23_private), GFP_KERNEL);
304 if (pdec == NULL)
305 return -ENOMEM;
306 pwc->decompress_data = pdec;
307 }
308 pdec = pwc->decompress_data;
309 301
310 mutex_init(&pdec->lock); 302 mutex_init(&pdec->lock);
311 303
312 if (DEVICE_USE_CODEC3(type)) { 304 if (pdec->last_cmd_valid && pdec->last_cmd == cmd[2])
305 return;
306
307 if (DEVICE_USE_CODEC3(pdev->type)) {
313 flags = cmd[2] & 0x18; 308 flags = cmd[2] & 0x18;
314 if (flags == 8) 309 if (flags == 8)
315 pdec->nbits = 7; /* More bits, mean more bits to encode the stream, but better quality */ 310 pdec->nbits = 7; /* More bits, mean more bits to encode the stream, but better quality */
@@ -356,7 +351,8 @@ int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd)
356 pwc_crop_table[MAX_OUTER_CROP_VALUE+256+i] = 255; 351 pwc_crop_table[MAX_OUTER_CROP_VALUE+256+i] = 255;
357#endif 352#endif
358 353
359 return 0; 354 pdec->last_cmd = cmd[2];
355 pdec->last_cmd_valid = 1;
360} 356}
361 357
362/* 358/*
@@ -659,12 +655,12 @@ static void DecompressBand23(struct pwc_dec23_private *pdec,
659 * src: raw data 655 * src: raw data
660 * dst: image output 656 * dst: image output
661 */ 657 */
662void pwc_dec23_decompress(const struct pwc_device *pwc, 658void pwc_dec23_decompress(struct pwc_device *pdev,
663 const void *src, 659 const void *src,
664 void *dst) 660 void *dst)
665{ 661{
666 int bandlines_left, bytes_per_block; 662 int bandlines_left, bytes_per_block;
667 struct pwc_dec23_private *pdec = pwc->decompress_data; 663 struct pwc_dec23_private *pdec = &pdev->dec23;
668 664
669 /* YUV420P image format */ 665 /* YUV420P image format */
670 unsigned char *pout_planar_y; 666 unsigned char *pout_planar_y;
@@ -674,23 +670,22 @@ void pwc_dec23_decompress(const struct pwc_device *pwc,
674 670
675 mutex_lock(&pdec->lock); 671 mutex_lock(&pdec->lock);
676 672
677 bandlines_left = pwc->height / 4; 673 bandlines_left = pdev->height / 4;
678 bytes_per_block = pwc->width * 4; 674 bytes_per_block = pdev->width * 4;
679 plane_size = pwc->height * pwc->width; 675 plane_size = pdev->height * pdev->width;
680 676
681 pout_planar_y = dst; 677 pout_planar_y = dst;
682 pout_planar_u = dst + plane_size; 678 pout_planar_u = dst + plane_size;
683 pout_planar_v = dst + plane_size + plane_size / 4; 679 pout_planar_v = dst + plane_size + plane_size / 4;
684 680
685 while (bandlines_left--) { 681 while (bandlines_left--) {
686 DecompressBand23(pwc->decompress_data, 682 DecompressBand23(pdec, src,
687 src,
688 pout_planar_y, pout_planar_u, pout_planar_v, 683 pout_planar_y, pout_planar_u, pout_planar_v,
689 pwc->width, pwc->width); 684 pdev->width, pdev->width);
690 src += pwc->vbandlength; 685 src += pdev->vbandlength;
691 pout_planar_y += bytes_per_block; 686 pout_planar_y += bytes_per_block;
692 pout_planar_u += pwc->width; 687 pout_planar_u += pdev->width;
693 pout_planar_v += pwc->width; 688 pout_planar_v += pdev->width;
694 } 689 }
695 mutex_unlock(&pdec->lock); 690 mutex_unlock(&pdec->lock);
696} 691}
diff --git a/drivers/media/video/pwc/pwc-dec23.h b/drivers/media/video/pwc/pwc-dec23.h
index d64a3c281af6..c655b1c1e6a9 100644
--- a/drivers/media/video/pwc/pwc-dec23.h
+++ b/drivers/media/video/pwc/pwc-dec23.h
@@ -25,17 +25,20 @@
25#ifndef PWC_DEC23_H 25#ifndef PWC_DEC23_H
26#define PWC_DEC23_H 26#define PWC_DEC23_H
27 27
28#include "pwc.h" 28struct pwc_device;
29 29
30struct pwc_dec23_private 30struct pwc_dec23_private
31{ 31{
32 struct mutex lock; 32 struct mutex lock;
33 33
34 unsigned char last_cmd, last_cmd_valid;
35
34 unsigned int scalebits; 36 unsigned int scalebits;
35 unsigned int nbitsmask, nbits; /* Number of bits of a color in the compressed stream */ 37 unsigned int nbitsmask, nbits; /* Number of bits of a color in the compressed stream */
36 38
37 unsigned int reservoir; 39 unsigned int reservoir;
38 unsigned int nbits_in_reservoir; 40 unsigned int nbits_in_reservoir;
41
39 const unsigned char *stream; 42 const unsigned char *stream;
40 int temp_colors[16]; 43 int temp_colors[16];
41 44
@@ -51,8 +54,8 @@ struct pwc_dec23_private
51 54
52}; 55};
53 56
54int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd); 57void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd);
55void pwc_dec23_decompress(const struct pwc_device *pwc, 58void pwc_dec23_decompress(struct pwc_device *pdev,
56 const void *src, 59 const void *src,
57 void *dst); 60 void *dst);
58#endif 61#endif
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 943d37ad0d33..122fbd0081eb 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -128,18 +128,11 @@ static struct usb_driver pwc_driver = {
128#define MAX_DEV_HINTS 20 128#define MAX_DEV_HINTS 20
129#define MAX_ISOC_ERRORS 20 129#define MAX_ISOC_ERRORS 20
130 130
131static int default_fps = 10;
132#ifdef CONFIG_USB_PWC_DEBUG 131#ifdef CONFIG_USB_PWC_DEBUG
133 int pwc_trace = PWC_DEBUG_LEVEL; 132 int pwc_trace = PWC_DEBUG_LEVEL;
134#endif 133#endif
135static int power_save = -1; 134static int power_save = -1;
136static int led_on = 100, led_off; /* defaults to LED that is on while in use */ 135static int leds[2] = { 100, 0 };
137static struct {
138 int type;
139 char serial_number[30];
140 int device_node;
141 struct pwc_device *pdev;
142} device_hint[MAX_DEV_HINTS];
143 136
144/***/ 137/***/
145 138
@@ -386,8 +379,8 @@ static int pwc_isoc_init(struct pwc_device *pdev)
386retry: 379retry:
387 /* We first try with low compression and then retry with a higher 380 /* We first try with low compression and then retry with a higher
388 compression setting if there is not enough bandwidth. */ 381 compression setting if there is not enough bandwidth. */
389 ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, 382 ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt,
390 pdev->vframes, &compression); 383 pdev->vframes, &compression, 1);
391 384
392 /* Get the current alternate interface, adjust packet size */ 385 /* Get the current alternate interface, adjust packet size */
393 intf = usb_ifnum_to_if(udev, 0); 386 intf = usb_ifnum_to_if(udev, 0);
@@ -597,23 +590,9 @@ leave:
597static void pwc_video_release(struct v4l2_device *v) 590static void pwc_video_release(struct v4l2_device *v)
598{ 591{
599 struct pwc_device *pdev = container_of(v, struct pwc_device, v4l2_dev); 592 struct pwc_device *pdev = container_of(v, struct pwc_device, v4l2_dev);
600 int hint;
601
602 /* search device_hint[] table if we occupy a slot, by any chance */
603 for (hint = 0; hint < MAX_DEV_HINTS; hint++)
604 if (device_hint[hint].pdev == pdev)
605 device_hint[hint].pdev = NULL;
606
607 /* Free intermediate decompression buffer & tables */
608 if (pdev->decompress_data != NULL) {
609 PWC_DEBUG_MEMORY("Freeing decompression buffer at %p.\n",
610 pdev->decompress_data);
611 kfree(pdev->decompress_data);
612 pdev->decompress_data = NULL;
613 }
614 593
615 v4l2_ctrl_handler_free(&pdev->ctrl_handler); 594 v4l2_ctrl_handler_free(&pdev->ctrl_handler);
616 595 kfree(pdev->ctrl_buf);
617 kfree(pdev); 596 kfree(pdev);
618} 597}
619 598
@@ -758,7 +737,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
758 737
759 /* Turn on camera and set LEDS on */ 738 /* Turn on camera and set LEDS on */
760 pwc_camera_power(pdev, 1); 739 pwc_camera_power(pdev, 1);
761 pwc_set_leds(pdev, led_on, led_off); 740 pwc_set_leds(pdev, leds[0], leds[1]);
762 741
763 r = pwc_isoc_init(pdev); 742 r = pwc_isoc_init(pdev);
764 if (r) { 743 if (r) {
@@ -813,10 +792,9 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
813 struct usb_device *udev = interface_to_usbdev(intf); 792 struct usb_device *udev = interface_to_usbdev(intf);
814 struct pwc_device *pdev = NULL; 793 struct pwc_device *pdev = NULL;
815 int vendor_id, product_id, type_id; 794 int vendor_id, product_id, type_id;
816 int hint, rc; 795 int rc;
817 int features = 0; 796 int features = 0;
818 int compression = 0; 797 int compression = 0;
819 int video_nr = -1; /* default: use next available device */
820 int my_power_save = power_save; 798 int my_power_save = power_save;
821 char serial_number[30], *name; 799 char serial_number[30], *name;
822 800
@@ -1076,7 +1054,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1076 return -ENOMEM; 1054 return -ENOMEM;
1077 } 1055 }
1078 pdev->type = type_id; 1056 pdev->type = type_id;
1079 pdev->vframes = default_fps;
1080 pdev->features = features; 1057 pdev->features = features;
1081 pwc_construct(pdev); /* set min/max sizes correct */ 1058 pwc_construct(pdev); /* set min/max sizes correct */
1082 1059
@@ -1107,24 +1084,14 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1107 pdev->release = le16_to_cpu(udev->descriptor.bcdDevice); 1084 pdev->release = le16_to_cpu(udev->descriptor.bcdDevice);
1108 PWC_DEBUG_PROBE("Release: %04x\n", pdev->release); 1085 PWC_DEBUG_PROBE("Release: %04x\n", pdev->release);
1109 1086
1110 /* Now search device_hint[] table for a match, so we can hint a node number. */ 1087 /* Allocate USB command buffers */
1111 for (hint = 0; hint < MAX_DEV_HINTS; hint++) { 1088 pdev->ctrl_buf = kmalloc(sizeof(pdev->cmd_buf), GFP_KERNEL);
1112 if (((device_hint[hint].type == -1) || (device_hint[hint].type == pdev->type)) && 1089 if (!pdev->ctrl_buf) {
1113 (device_hint[hint].pdev == NULL)) { 1090 PWC_ERROR("Oops, could not allocate memory for pwc_device.\n");
1114 /* so far, so good... try serial number */ 1091 rc = -ENOMEM;
1115 if ((device_hint[hint].serial_number[0] == '*') || !strcmp(device_hint[hint].serial_number, serial_number)) { 1092 goto err_free_mem;
1116 /* match! */
1117 video_nr = device_hint[hint].device_node;
1118 PWC_DEBUG_PROBE("Found hint, will try to register as /dev/video%d\n", video_nr);
1119 break;
1120 }
1121 }
1122 } 1093 }
1123 1094
1124 /* occupy slot */
1125 if (hint < MAX_DEV_HINTS)
1126 device_hint[hint].pdev = pdev;
1127
1128#ifdef CONFIG_USB_PWC_DEBUG 1095#ifdef CONFIG_USB_PWC_DEBUG
1129 /* Query sensor type */ 1096 /* Query sensor type */
1130 if (pwc_get_cmos_sensor(pdev, &rc) >= 0) { 1097 if (pwc_get_cmos_sensor(pdev, &rc) >= 0) {
@@ -1138,8 +1105,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1138 pwc_set_leds(pdev, 0, 0); 1105 pwc_set_leds(pdev, 0, 0);
1139 1106
1140 /* Setup intial videomode */ 1107 /* Setup intial videomode */
1141 rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT, pdev->vframes, 1108 rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT,
1142 &compression); 1109 V4L2_PIX_FMT_YUV420, 30, &compression, 1);
1143 if (rc) 1110 if (rc)
1144 goto err_free_mem; 1111 goto err_free_mem;
1145 1112
@@ -1164,7 +1131,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1164 pdev->v4l2_dev.ctrl_handler = &pdev->ctrl_handler; 1131 pdev->v4l2_dev.ctrl_handler = &pdev->ctrl_handler;
1165 pdev->vdev.v4l2_dev = &pdev->v4l2_dev; 1132 pdev->vdev.v4l2_dev = &pdev->v4l2_dev;
1166 1133
1167 rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, video_nr); 1134 rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, -1);
1168 if (rc < 0) { 1135 if (rc < 0) {
1169 PWC_ERROR("Failed to register as video device (%d).\n", rc); 1136 PWC_ERROR("Failed to register as video device (%d).\n", rc);
1170 goto err_unregister_v4l2_dev; 1137 goto err_unregister_v4l2_dev;
@@ -1207,8 +1174,7 @@ err_unregister_v4l2_dev:
1207err_free_controls: 1174err_free_controls:
1208 v4l2_ctrl_handler_free(&pdev->ctrl_handler); 1175 v4l2_ctrl_handler_free(&pdev->ctrl_handler);
1209err_free_mem: 1176err_free_mem:
1210 if (hint < MAX_DEV_HINTS) 1177 kfree(pdev->ctrl_buf);
1211 device_hint[hint].pdev = NULL;
1212 kfree(pdev); 1178 kfree(pdev);
1213 return rc; 1179 return rc;
1214} 1180}
@@ -1243,27 +1209,19 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
1243 * Initialization code & module stuff 1209 * Initialization code & module stuff
1244 */ 1210 */
1245 1211
1246static int fps;
1247static int leds[2] = { -1, -1 };
1248static unsigned int leds_nargs; 1212static unsigned int leds_nargs;
1249static char *dev_hint[MAX_DEV_HINTS];
1250static unsigned int dev_hint_nargs;
1251 1213
1252module_param(fps, int, 0444);
1253#ifdef CONFIG_USB_PWC_DEBUG 1214#ifdef CONFIG_USB_PWC_DEBUG
1254module_param_named(trace, pwc_trace, int, 0644); 1215module_param_named(trace, pwc_trace, int, 0644);
1255#endif 1216#endif
1256module_param(power_save, int, 0644); 1217module_param(power_save, int, 0644);
1257module_param_array(leds, int, &leds_nargs, 0444); 1218module_param_array(leds, int, &leds_nargs, 0444);
1258module_param_array(dev_hint, charp, &dev_hint_nargs, 0444);
1259 1219
1260MODULE_PARM_DESC(fps, "Initial frames per second. Varies with model, useful range 5-30");
1261#ifdef CONFIG_USB_PWC_DEBUG 1220#ifdef CONFIG_USB_PWC_DEBUG
1262MODULE_PARM_DESC(trace, "For debugging purposes"); 1221MODULE_PARM_DESC(trace, "For debugging purposes");
1263#endif 1222#endif
1264MODULE_PARM_DESC(power_save, "Turn power saving for new cameras on or off"); 1223MODULE_PARM_DESC(power_save, "Turn power saving for new cameras on or off");
1265MODULE_PARM_DESC(leds, "LED on,off time in milliseconds"); 1224MODULE_PARM_DESC(leds, "LED on,off time in milliseconds");
1266MODULE_PARM_DESC(dev_hint, "Device node hints");
1267 1225
1268MODULE_DESCRIPTION("Philips & OEM USB webcam driver"); 1226MODULE_DESCRIPTION("Philips & OEM USB webcam driver");
1269MODULE_AUTHOR("Luc Saillard <luc@saillard.org>"); 1227MODULE_AUTHOR("Luc Saillard <luc@saillard.org>");
@@ -1273,114 +1231,13 @@ MODULE_VERSION( PWC_VERSION );
1273 1231
1274static int __init usb_pwc_init(void) 1232static int __init usb_pwc_init(void)
1275{ 1233{
1276 int i;
1277
1278#ifdef CONFIG_USB_PWC_DEBUG
1279 PWC_INFO("Philips webcam module version " PWC_VERSION " loaded.\n");
1280 PWC_INFO("Supports Philips PCA645/646, PCVC675/680/690, PCVC720[40]/730/740/750 & PCVC830/840.\n");
1281 PWC_INFO("Also supports the Askey VC010, various Logitech Quickcams, Samsung MPC-C10 and MPC-C30,\n");
1282 PWC_INFO("the Creative WebCam 5 & Pro Ex, SOTEC Afina Eye and Visionite VCS-UC300 and VCS-UM100.\n");
1283
1284 if (pwc_trace >= 0) {
1285 PWC_DEBUG_MODULE("Trace options: 0x%04x\n", pwc_trace);
1286 }
1287#endif
1288
1289 if (fps) {
1290 if (fps < 4 || fps > 30) {
1291 PWC_ERROR("Framerate out of bounds (4-30).\n");
1292 return -EINVAL;
1293 }
1294 default_fps = fps;
1295 PWC_DEBUG_MODULE("Default framerate set to %d.\n", default_fps);
1296 }
1297
1298 if (leds[0] >= 0)
1299 led_on = leds[0];
1300 if (leds[1] >= 0)
1301 led_off = leds[1];
1302
1303 /* Big device node whoopla. Basically, it allows you to assign a
1304 device node (/dev/videoX) to a camera, based on its type
1305 & serial number. The format is [type[.serialnumber]:]node.
1306
1307 Any camera that isn't matched by these rules gets the next
1308 available free device node.
1309 */
1310 for (i = 0; i < MAX_DEV_HINTS; i++) {
1311 char *s, *colon, *dot;
1312
1313 /* This loop also initializes the array */
1314 device_hint[i].pdev = NULL;
1315 s = dev_hint[i];
1316 if (s != NULL && *s != '\0') {
1317 device_hint[i].type = -1; /* wildcard */
1318 strcpy(device_hint[i].serial_number, "*");
1319
1320 /* parse string: chop at ':' & '/' */
1321 colon = dot = s;
1322 while (*colon != '\0' && *colon != ':')
1323 colon++;
1324 while (*dot != '\0' && *dot != '.')
1325 dot++;
1326 /* Few sanity checks */
1327 if (*dot != '\0' && dot > colon) {
1328 PWC_ERROR("Malformed camera hint: the colon must be after the dot.\n");
1329 return -EINVAL;
1330 }
1331
1332 if (*colon == '\0') {
1333 /* No colon */
1334 if (*dot != '\0') {
1335 PWC_ERROR("Malformed camera hint: no colon + device node given.\n");
1336 return -EINVAL;
1337 }
1338 else {
1339 /* No type or serial number specified, just a number. */
1340 device_hint[i].device_node =
1341 simple_strtol(s, NULL, 10);
1342 }
1343 }
1344 else {
1345 /* There's a colon, so we have at least a type and a device node */
1346 device_hint[i].type =
1347 simple_strtol(s, NULL, 10);
1348 device_hint[i].device_node =
1349 simple_strtol(colon + 1, NULL, 10);
1350 if (*dot != '\0') {
1351 /* There's a serial number as well */
1352 int k;
1353
1354 dot++;
1355 k = 0;
1356 while (*dot != ':' && k < 29) {
1357 device_hint[i].serial_number[k++] = *dot;
1358 dot++;
1359 }
1360 device_hint[i].serial_number[k] = '\0';
1361 }
1362 }
1363 PWC_TRACE("device_hint[%d]:\n", i);
1364 PWC_TRACE(" type : %d\n", device_hint[i].type);
1365 PWC_TRACE(" serial# : %s\n", device_hint[i].serial_number);
1366 PWC_TRACE(" node : %d\n", device_hint[i].device_node);
1367 }
1368 else
1369 device_hint[i].type = 0; /* not filled */
1370 } /* ..for MAX_DEV_HINTS */
1371
1372 PWC_DEBUG_PROBE("Registering driver at address 0x%p.\n", &pwc_driver);
1373 return usb_register(&pwc_driver); 1234 return usb_register(&pwc_driver);
1374} 1235}
1375 1236
1376static void __exit usb_pwc_exit(void) 1237static void __exit usb_pwc_exit(void)
1377{ 1238{
1378 PWC_DEBUG_MODULE("Deregistering driver.\n");
1379 usb_deregister(&pwc_driver); 1239 usb_deregister(&pwc_driver);
1380 PWC_INFO("Philips webcam module removed.\n");
1381} 1240}
1382 1241
1383module_init(usb_pwc_init); 1242module_init(usb_pwc_init);
1384module_exit(usb_pwc_exit); 1243module_exit(usb_pwc_exit);
1385
1386/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
diff --git a/drivers/media/video/pwc/pwc-misc.c b/drivers/media/video/pwc/pwc-misc.c
index 23a55b5814fc..9be5adffa874 100644
--- a/drivers/media/video/pwc/pwc-misc.c
+++ b/drivers/media/video/pwc/pwc-misc.c
@@ -90,5 +90,4 @@ void pwc_construct(struct pwc_device *pdev)
90 pdev->frame_header_size = 0; 90 pdev->frame_header_size = 0;
91 pdev->frame_trailer_size = 0; 91 pdev->frame_trailer_size = 0;
92 } 92 }
93 pdev->pixfmt = V4L2_PIX_FMT_YUV420; /* default */
94} 93}
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index 80e25842e84a..f495eeb5403a 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -493,16 +493,11 @@ static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
493 (pixelformat>>24)&255); 493 (pixelformat>>24)&255);
494 494
495 ret = pwc_set_video_mode(pdev, f->fmt.pix.width, f->fmt.pix.height, 495 ret = pwc_set_video_mode(pdev, f->fmt.pix.width, f->fmt.pix.height,
496 pdev->vframes, &compression); 496 pixelformat, 30, &compression, 0);
497 497
498 PWC_DEBUG_IOCTL("pwc_set_video_mode(), return=%d\n", ret); 498 PWC_DEBUG_IOCTL("pwc_set_video_mode(), return=%d\n", ret);
499 499
500 if (ret == 0) { 500 pwc_vidioc_fill_fmt(f, pdev->width, pdev->height, pdev->pixfmt);
501 pdev->pixfmt = pixelformat;
502 pwc_vidioc_fill_fmt(f, pdev->width, pdev->height,
503 pdev->pixfmt);
504 }
505
506leave: 501leave:
507 mutex_unlock(&pdev->udevlock); 502 mutex_unlock(&pdev->udevlock);
508 return ret; 503 return ret;
@@ -777,33 +772,33 @@ static int pwc_set_autogain_expo(struct pwc_device *pdev)
777static int pwc_set_motor(struct pwc_device *pdev) 772static int pwc_set_motor(struct pwc_device *pdev)
778{ 773{
779 int ret; 774 int ret;
780 u8 buf[4];
781 775
782 buf[0] = 0; 776 pdev->ctrl_buf[0] = 0;
783 if (pdev->motor_pan_reset->is_new) 777 if (pdev->motor_pan_reset->is_new)
784 buf[0] |= 0x01; 778 pdev->ctrl_buf[0] |= 0x01;
785 if (pdev->motor_tilt_reset->is_new) 779 if (pdev->motor_tilt_reset->is_new)
786 buf[0] |= 0x02; 780 pdev->ctrl_buf[0] |= 0x02;
787 if (pdev->motor_pan_reset->is_new || pdev->motor_tilt_reset->is_new) { 781 if (pdev->motor_pan_reset->is_new || pdev->motor_tilt_reset->is_new) {
788 ret = send_control_msg(pdev, SET_MPT_CTL, 782 ret = send_control_msg(pdev, SET_MPT_CTL,
789 PT_RESET_CONTROL_FORMATTER, buf, 1); 783 PT_RESET_CONTROL_FORMATTER,
784 pdev->ctrl_buf, 1);
790 if (ret < 0) 785 if (ret < 0)
791 return ret; 786 return ret;
792 } 787 }
793 788
794 memset(buf, 0, sizeof(buf)); 789 memset(pdev->ctrl_buf, 0, 4);
795 if (pdev->motor_pan->is_new) { 790 if (pdev->motor_pan->is_new) {
796 buf[0] = pdev->motor_pan->val & 0xFF; 791 pdev->ctrl_buf[0] = pdev->motor_pan->val & 0xFF;
797 buf[1] = (pdev->motor_pan->val >> 8); 792 pdev->ctrl_buf[1] = (pdev->motor_pan->val >> 8);
798 } 793 }
799 if (pdev->motor_tilt->is_new) { 794 if (pdev->motor_tilt->is_new) {
800 buf[2] = pdev->motor_tilt->val & 0xFF; 795 pdev->ctrl_buf[2] = pdev->motor_tilt->val & 0xFF;
801 buf[3] = (pdev->motor_tilt->val >> 8); 796 pdev->ctrl_buf[3] = (pdev->motor_tilt->val >> 8);
802 } 797 }
803 if (pdev->motor_pan->is_new || pdev->motor_tilt->is_new) { 798 if (pdev->motor_pan->is_new || pdev->motor_tilt->is_new) {
804 ret = send_control_msg(pdev, SET_MPT_CTL, 799 ret = send_control_msg(pdev, SET_MPT_CTL,
805 PT_RELATIVE_CONTROL_FORMATTER, 800 PT_RELATIVE_CONTROL_FORMATTER,
806 buf, sizeof(buf)); 801 pdev->ctrl_buf, 4);
807 if (ret < 0) 802 if (ret < 0)
808 return ret; 803 return ret;
809 } 804 }
@@ -1094,6 +1089,63 @@ static int pwc_enum_frameintervals(struct file *file, void *fh,
1094 return 0; 1089 return 0;
1095} 1090}
1096 1091
1092static int pwc_g_parm(struct file *file, void *fh,
1093 struct v4l2_streamparm *parm)
1094{
1095 struct pwc_device *pdev = video_drvdata(file);
1096
1097 if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1098 return -EINVAL;
1099
1100 memset(parm, 0, sizeof(*parm));
1101
1102 parm->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1103 parm->parm.capture.readbuffers = MIN_FRAMES;
1104 parm->parm.capture.capability |= V4L2_CAP_TIMEPERFRAME;
1105 parm->parm.capture.timeperframe.denominator = pdev->vframes;
1106 parm->parm.capture.timeperframe.numerator = 1;
1107
1108 return 0;
1109}
1110
1111static int pwc_s_parm(struct file *file, void *fh,
1112 struct v4l2_streamparm *parm)
1113{
1114 struct pwc_device *pdev = video_drvdata(file);
1115 int compression = 0;
1116 int ret, fps;
1117
1118 if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1119 parm->parm.capture.timeperframe.numerator == 0)
1120 return -EINVAL;
1121
1122 if (pwc_test_n_set_capt_file(pdev, file))
1123 return -EBUSY;
1124
1125 fps = parm->parm.capture.timeperframe.denominator /
1126 parm->parm.capture.timeperframe.numerator;
1127
1128 mutex_lock(&pdev->udevlock);
1129 if (!pdev->udev) {
1130 ret = -ENODEV;
1131 goto leave;
1132 }
1133
1134 if (pdev->iso_init) {
1135 ret = -EBUSY;
1136 goto leave;
1137 }
1138
1139 ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt,
1140 fps, &compression, 0);
1141
1142 pwc_g_parm(file, fh, parm);
1143
1144leave:
1145 mutex_unlock(&pdev->udevlock);
1146 return ret;
1147}
1148
1097static int pwc_log_status(struct file *file, void *priv) 1149static int pwc_log_status(struct file *file, void *priv)
1098{ 1150{
1099 struct pwc_device *pdev = video_drvdata(file); 1151 struct pwc_device *pdev = video_drvdata(file);
@@ -1120,4 +1172,6 @@ const struct v4l2_ioctl_ops pwc_ioctl_ops = {
1120 .vidioc_log_status = pwc_log_status, 1172 .vidioc_log_status = pwc_log_status,
1121 .vidioc_enum_framesizes = pwc_enum_framesizes, 1173 .vidioc_enum_framesizes = pwc_enum_framesizes,
1122 .vidioc_enum_frameintervals = pwc_enum_frameintervals, 1174 .vidioc_enum_frameintervals = pwc_enum_frameintervals,
1175 .vidioc_g_parm = pwc_g_parm,
1176 .vidioc_s_parm = pwc_s_parm,
1123}; 1177};
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index 47c518fef179..e4d4d711dd1f 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -44,6 +44,8 @@
44#ifdef CONFIG_USB_PWC_INPUT_EVDEV 44#ifdef CONFIG_USB_PWC_INPUT_EVDEV
45#include <linux/input.h> 45#include <linux/input.h>
46#endif 46#endif
47#include "pwc-dec1.h"
48#include "pwc-dec23.h"
47 49
48/* Version block */ 50/* Version block */
49#define PWC_VERSION "10.0.15" 51#define PWC_VERSION "10.0.15"
@@ -132,9 +134,6 @@
132#define DEVICE_USE_CODEC3(x) ((x)>=700) 134#define DEVICE_USE_CODEC3(x) ((x)>=700)
133#define DEVICE_USE_CODEC23(x) ((x)>=675) 135#define DEVICE_USE_CODEC23(x) ((x)>=675)
134 136
135/* from pwc-dec.h */
136#define PWCX_FLAG_PLANAR 0x0001
137
138/* Request types: video */ 137/* Request types: video */
139#define SET_LUM_CTL 0x01 138#define SET_LUM_CTL 0x01
140#define GET_LUM_CTL 0x02 139#define GET_LUM_CTL 0x02
@@ -248,8 +247,8 @@ struct pwc_device
248 char vmirror; /* for ToUCaM series */ 247 char vmirror; /* for ToUCaM series */
249 char power_save; /* Do powersaving for this cam */ 248 char power_save; /* Do powersaving for this cam */
250 249
251 int cmd_len;
252 unsigned char cmd_buf[13]; 250 unsigned char cmd_buf[13];
251 unsigned char *ctrl_buf;
253 252
254 struct urb *urbs[MAX_ISO_BUFS]; 253 struct urb *urbs[MAX_ISO_BUFS];
255 char iso_init; 254 char iso_init;
@@ -272,7 +271,10 @@ struct pwc_device
272 int frame_total_size; /* including header & trailer */ 271 int frame_total_size; /* including header & trailer */
273 int drop_frames; 272 int drop_frames;
274 273
275 void *decompress_data; /* private data for decompression engine */ 274 union { /* private data for decompression engine */
275 struct pwc_dec1_private dec1;
276 struct pwc_dec23_private dec23;
277 };
276 278
277 /* 279 /*
278 * We have an 'image' and a 'view', where 'image' is the fixed-size img 280 * We have an 'image' and a 'view', where 'image' is the fixed-size img
@@ -364,7 +366,7 @@ void pwc_construct(struct pwc_device *pdev);
364/** Functions in pwc-ctrl.c */ 366/** Functions in pwc-ctrl.c */
365/* Request a certain video mode. Returns < 0 if not possible */ 367/* Request a certain video mode. Returns < 0 if not possible */
366extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, 368extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height,
367 int frames, int *compression); 369 int pixfmt, int frames, int *compression, int send_to_cam);
368extern unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size); 370extern unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size);
369extern int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value); 371extern int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value);
370extern int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor); 372extern int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor);
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index 510cfab477ff..a9e9653beeb4 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -693,7 +693,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
693 mf->code = 0; 693 mf->code = 0;
694 continue; 694 continue;
695 } 695 }
696 if (mf->width != tfmt->width || mf->width != tfmt->width) { 696 if (mf->width != tfmt->width || mf->height != tfmt->height) {
697 u32 fcc = ffmt->fourcc; 697 u32 fcc = ffmt->fourcc;
698 tfmt->width = mf->width; 698 tfmt->width = mf->width;
699 tfmt->height = mf->height; 699 tfmt->height = mf->height;
@@ -702,7 +702,8 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
702 NULL, &fcc, FIMC_SD_PAD_SOURCE); 702 NULL, &fcc, FIMC_SD_PAD_SOURCE);
703 if (ffmt && ffmt->mbus_code) 703 if (ffmt && ffmt->mbus_code)
704 mf->code = ffmt->mbus_code; 704 mf->code = ffmt->mbus_code;
705 if (mf->width != tfmt->width || mf->width != tfmt->width) 705 if (mf->width != tfmt->width ||
706 mf->height != tfmt->height)
706 continue; 707 continue;
707 tfmt->code = mf->code; 708 tfmt->code = mf->code;
708 } 709 }
@@ -710,7 +711,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
710 ret = v4l2_subdev_call(csis, pad, set_fmt, NULL, &sfmt); 711 ret = v4l2_subdev_call(csis, pad, set_fmt, NULL, &sfmt);
711 712
712 if (mf->code == tfmt->code && 713 if (mf->code == tfmt->code &&
713 mf->width == tfmt->width && mf->width == tfmt->width) 714 mf->width == tfmt->width && mf->height == tfmt->height)
714 break; 715 break;
715 } 716 }
716 717
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index f5cbb8a4c540..81bcbb9492ea 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -848,11 +848,11 @@ int fimc_ctrls_create(struct fimc_ctx *ctx)
848 v4l2_ctrl_handler_init(&ctx->ctrl_handler, 4); 848 v4l2_ctrl_handler_init(&ctx->ctrl_handler, 4);
849 849
850 ctx->ctrl_rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops, 850 ctx->ctrl_rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
851 V4L2_CID_HFLIP, 0, 1, 1, 0); 851 V4L2_CID_ROTATE, 0, 270, 90, 0);
852 ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops, 852 ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
853 V4L2_CID_VFLIP, 0, 1, 1, 0); 853 V4L2_CID_HFLIP, 0, 1, 1, 0);
854 ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops, 854 ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
855 V4L2_CID_ROTATE, 0, 270, 90, 0); 855 V4L2_CID_VFLIP, 0, 1, 1, 0);
856 if (variant->has_alpha) 856 if (variant->has_alpha)
857 ctx->ctrl_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler, 857 ctx->ctrl_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler,
858 &fimc_ctrl_ops, V4L2_CID_ALPHA_COMPONENT, 858 &fimc_ctrl_ops, V4L2_CID_ALPHA_COMPONENT,
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.c b/drivers/media/video/s5p-fimc/fimc-mdevice.c
index 615c862f0360..8ea4ee116e46 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.c
@@ -21,7 +21,6 @@
21#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/version.h>
25#include <media/v4l2-ctrls.h> 24#include <media/v4l2-ctrls.h>
26#include <media/media-device.h> 25#include <media/media-device.h>
27 26
diff --git a/drivers/media/video/s5p-g2d/g2d.c b/drivers/media/video/s5p-g2d/g2d.c
index c40b0dde1883..febaa673d363 100644
--- a/drivers/media/video/s5p-g2d/g2d.c
+++ b/drivers/media/video/s5p-g2d/g2d.c
@@ -184,6 +184,7 @@ static int g2d_s_ctrl(struct v4l2_ctrl *ctrl)
184 ctx->rop = ROP4_INVERT; 184 ctx->rop = ROP4_INVERT;
185 else 185 else
186 ctx->rop = ROP4_COPY; 186 ctx->rop = ROP4_COPY;
187 break;
187 default: 188 default:
188 v4l2_err(&ctx->dev->v4l2_dev, "unknown control\n"); 189 v4l2_err(&ctx->dev->v4l2_dev, "unknown control\n");
189 return -EINVAL; 190 return -EINVAL;
diff --git a/drivers/media/video/s5p-jpeg/jpeg-core.c b/drivers/media/video/s5p-jpeg/jpeg-core.c
index f841a3e9845c..1105a8749c8b 100644
--- a/drivers/media/video/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/video/s5p-jpeg/jpeg-core.c
@@ -989,9 +989,10 @@ static struct v4l2_m2m_ops s5p_jpeg_m2m_ops = {
989 * ============================================================================ 989 * ============================================================================
990 */ 990 */
991 991
992static int s5p_jpeg_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, 992static int s5p_jpeg_queue_setup(struct vb2_queue *vq,
993 unsigned int *nplanes, unsigned int sizes[], 993 const struct v4l2_format *fmt,
994 void *alloc_ctxs[]) 994 unsigned int *nbuffers, unsigned int *nplanes,
995 unsigned int sizes[], void *alloc_ctxs[])
995{ 996{
996 struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq); 997 struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
997 struct s5p_jpeg_q_data *q_data = NULL; 998 struct s5p_jpeg_q_data *q_data = NULL;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc.c b/drivers/media/video/s5p-mfc/s5p_mfc.c
index e43e128baf5f..83fe461af263 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc.c
@@ -18,7 +18,6 @@
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/version.h>
22#include <linux/videodev2.h> 21#include <linux/videodev2.h>
23#include <linux/workqueue.h> 22#include <linux/workqueue.h>
24#include <media/videobuf2-core.h> 23#include <media/videobuf2-core.h>
@@ -475,7 +474,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
475 ctx->mv_size = 0; 474 ctx->mv_size = 0;
476 } 475 }
477 ctx->dpb_count = s5p_mfc_get_dpb_count(); 476 ctx->dpb_count = s5p_mfc_get_dpb_count();
478 if (ctx->img_width == 0 || ctx->img_width == 0) 477 if (ctx->img_width == 0 || ctx->img_height == 0)
479 ctx->state = MFCINST_ERROR; 478 ctx->state = MFCINST_ERROR;
480 else 479 else
481 ctx->state = MFCINST_HEAD_PARSED; 480 ctx->state = MFCINST_HEAD_PARSED;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
index 844a4d7797bc..c25ec022d267 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -165,7 +165,7 @@ static struct mfc_control controls[] = {
165 .maximum = 32, 165 .maximum = 32,
166 .step = 1, 166 .step = 1,
167 .default_value = 1, 167 .default_value = 1,
168 .flags = V4L2_CTRL_FLAG_VOLATILE, 168 .is_volatile = 1,
169 }, 169 },
170}; 170};
171 171
diff --git a/drivers/media/video/saa7164/saa7164-cards.c b/drivers/media/video/saa7164/saa7164-cards.c
index 971591d6450f..5b72da5ce418 100644
--- a/drivers/media/video/saa7164/saa7164-cards.c
+++ b/drivers/media/video/saa7164/saa7164-cards.c
@@ -269,8 +269,6 @@ struct saa7164_board saa7164_boards[] = {
269 .portb = SAA7164_MPEG_DVB, 269 .portb = SAA7164_MPEG_DVB,
270 .portc = SAA7164_MPEG_ENCODER, 270 .portc = SAA7164_MPEG_ENCODER,
271 .portd = SAA7164_MPEG_ENCODER, 271 .portd = SAA7164_MPEG_ENCODER,
272 .portc = SAA7164_MPEG_ENCODER,
273 .portd = SAA7164_MPEG_ENCODER,
274 .porte = SAA7164_MPEG_VBI, 272 .porte = SAA7164_MPEG_VBI,
275 .portf = SAA7164_MPEG_VBI, 273 .portf = SAA7164_MPEG_VBI,
276 .chiprev = SAA7164_CHIP_REV3, 274 .chiprev = SAA7164_CHIP_REV3,
@@ -333,8 +331,6 @@ struct saa7164_board saa7164_boards[] = {
333 .portd = SAA7164_MPEG_ENCODER, 331 .portd = SAA7164_MPEG_ENCODER,
334 .porte = SAA7164_MPEG_VBI, 332 .porte = SAA7164_MPEG_VBI,
335 .portf = SAA7164_MPEG_VBI, 333 .portf = SAA7164_MPEG_VBI,
336 .porte = SAA7164_MPEG_VBI,
337 .portf = SAA7164_MPEG_VBI,
338 .chiprev = SAA7164_CHIP_REV3, 334 .chiprev = SAA7164_CHIP_REV3,
339 .unit = {{ 335 .unit = {{
340 .id = 0x28, 336 .id = 0x28,
diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c
index 129f135d5a5f..c096b3f74200 100644
--- a/drivers/media/video/tlg2300/pd-main.c
+++ b/drivers/media/video/tlg2300/pd-main.c
@@ -374,7 +374,7 @@ static inline void set_map_flags(struct poseidon *pd, struct usb_device *udev)
374} 374}
375#endif 375#endif
376 376
377static bool check_firmware(struct usb_device *udev, int *down_firmware) 377static int check_firmware(struct usb_device *udev, int *down_firmware)
378{ 378{
379 void *buf; 379 void *buf;
380 int ret; 380 int ret;
@@ -398,7 +398,7 @@ static bool check_firmware(struct usb_device *udev, int *down_firmware)
398 *down_firmware = 1; 398 *down_firmware = 1;
399 return firmware_download(udev); 399 return firmware_download(udev);
400 } 400 }
401 return ret; 401 return 0;
402} 402}
403 403
404static int poseidon_probe(struct usb_interface *interface, 404static int poseidon_probe(struct usb_interface *interface,
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index da1f4c2d2d4b..cccd42be718a 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -465,8 +465,8 @@ const char *v4l2_ctrl_get_name(u32 id)
465 case V4L2_CID_CHROMA_GAIN: return "Chroma Gain"; 465 case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
466 case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1"; 466 case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1";
467 case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2"; 467 case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2";
468 case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Minimum Number of Capture Buffers"; 468 case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers";
469 case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Minimum Number of Output Buffers"; 469 case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers";
470 case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component"; 470 case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component";
471 471
472 /* MPEG controls */ 472 /* MPEG controls */
@@ -506,25 +506,25 @@ const char *v4l2_ctrl_get_name(u32 id)
506 case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV"; 506 case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
507 case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface"; 507 case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface";
508 case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable"; 508 case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable";
509 case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "The Number of Intra Refresh MBs"; 509 case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs";
510 case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable"; 510 case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable";
511 case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control"; 511 case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control";
512 case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode"; 512 case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode";
513 case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "The Max Number of Reference Picture"; 513 case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics";
514 case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value"; 514 case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value";
515 case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P frame QP Value"; 515 case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value";
516 case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B frame QP Value"; 516 case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value";
517 case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value"; 517 case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value";
518 case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value"; 518 case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value";
519 case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value"; 519 case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value";
520 case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P frame QP Value"; 520 case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value";
521 case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B frame QP Value"; 521 case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value";
522 case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value"; 522 case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value";
523 case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value"; 523 case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value";
524 case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable"; 524 case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable";
525 case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size"; 525 case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size";
526 case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entorpy Mode"; 526 case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode";
527 case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I Period"; 527 case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period";
528 case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level"; 528 case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level";
529 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset"; 529 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset";
530 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset"; 530 case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset";
@@ -535,16 +535,16 @@ const char *v4l2_ctrl_get_name(u32 id)
535 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable"; 535 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable";
536 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC"; 536 case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC";
537 case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value"; 537 case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
538 case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P frame QP Value"; 538 case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value";
539 case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B frame QP Value"; 539 case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value";
540 case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value"; 540 case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value";
541 case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value"; 541 case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value";
542 case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level"; 542 case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level";
543 case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile"; 543 case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile";
544 case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable"; 544 case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable";
545 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "The Maximum Bytes Per Slice"; 545 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice";
546 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "The Number of MB in a Slice"; 546 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice";
547 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "The Slice Partitioning Method"; 547 case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method";
548 case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size"; 548 case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size";
549 549
550 /* CAMERA controls */ 550 /* CAMERA controls */
@@ -580,7 +580,7 @@ const char *v4l2_ctrl_get_name(u32 id)
580 case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled"; 580 case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled";
581 case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time"; 581 case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time";
582 case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation"; 582 case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation";
583 case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Feature Enabled"; 583 case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled";
584 case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain"; 584 case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain";
585 case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold"; 585 case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold";
586 case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time"; 586 case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time";
@@ -588,24 +588,24 @@ const char *v4l2_ctrl_get_name(u32 id)
588 case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled"; 588 case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled";
589 case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation"; 589 case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation";
590 case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency"; 590 case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency";
591 case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-emphasis settings"; 591 case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis";
592 case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level"; 592 case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level";
593 case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor"; 593 case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor";
594 594
595 /* Flash controls */ 595 /* Flash controls */
596 case V4L2_CID_FLASH_CLASS: return "Flash controls"; 596 case V4L2_CID_FLASH_CLASS: return "Flash Controls";
597 case V4L2_CID_FLASH_LED_MODE: return "LED mode"; 597 case V4L2_CID_FLASH_LED_MODE: return "LED Mode";
598 case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe source"; 598 case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source";
599 case V4L2_CID_FLASH_STROBE: return "Strobe"; 599 case V4L2_CID_FLASH_STROBE: return "Strobe";
600 case V4L2_CID_FLASH_STROBE_STOP: return "Stop strobe"; 600 case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe";
601 case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe status"; 601 case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status";
602 case V4L2_CID_FLASH_TIMEOUT: return "Strobe timeout"; 602 case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout";
603 case V4L2_CID_FLASH_INTENSITY: return "Intensity, flash mode"; 603 case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode";
604 case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, torch mode"; 604 case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode";
605 case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, indicator"; 605 case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator";
606 case V4L2_CID_FLASH_FAULT: return "Faults"; 606 case V4L2_CID_FLASH_FAULT: return "Faults";
607 case V4L2_CID_FLASH_CHARGE: return "Charge"; 607 case V4L2_CID_FLASH_CHARGE: return "Charge";
608 case V4L2_CID_FLASH_READY: return "Ready to strobe"; 608 case V4L2_CID_FLASH_READY: return "Ready to Strobe";
609 609
610 default: 610 default:
611 return NULL; 611 return NULL;
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 77feeb67e2db..3f623859a337 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -1871,6 +1871,7 @@ static long __video_do_ioctl(struct file *file,
1871 case VIDIOC_S_FREQUENCY: 1871 case VIDIOC_S_FREQUENCY:
1872 { 1872 {
1873 struct v4l2_frequency *p = arg; 1873 struct v4l2_frequency *p = arg;
1874 enum v4l2_tuner_type type;
1874 1875
1875 if (!ops->vidioc_s_frequency) 1876 if (!ops->vidioc_s_frequency)
1876 break; 1877 break;
@@ -1878,9 +1879,14 @@ static long __video_do_ioctl(struct file *file,
1878 ret = ret_prio; 1879 ret = ret_prio;
1879 break; 1880 break;
1880 } 1881 }
1882 type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
1883 V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
1881 dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n", 1884 dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n",
1882 p->tuner, p->type, p->frequency); 1885 p->tuner, p->type, p->frequency);
1883 ret = ops->vidioc_s_frequency(file, fh, p); 1886 if (p->type != type)
1887 ret = -EINVAL;
1888 else
1889 ret = ops->vidioc_s_frequency(file, fh, p);
1884 break; 1890 break;
1885 } 1891 }
1886 case VIDIOC_G_SLICED_VBI_CAP: 1892 case VIDIOC_G_SLICED_VBI_CAP:
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index f6d26419445e..4c09ab781ec3 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -1958,7 +1958,6 @@ static int zoran_g_fbuf(struct file *file, void *__fh,
1958 mutex_unlock(&zr->resource_lock); 1958 mutex_unlock(&zr->resource_lock);
1959 fb->fmt.colorspace = V4L2_COLORSPACE_SRGB; 1959 fb->fmt.colorspace = V4L2_COLORSPACE_SRGB;
1960 fb->fmt.field = V4L2_FIELD_INTERLACED; 1960 fb->fmt.field = V4L2_FIELD_INTERLACED;
1961 fb->flags = V4L2_FBUF_FLAG_OVERLAY;
1962 fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING; 1961 fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
1963 1962
1964 return 0; 1963 return 0;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 97fff785e97e..af295bb21d62 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2802,7 +2802,7 @@ pci_intx(struct pci_dev *pdev, int enable)
2802 2802
2803/** 2803/**
2804 * pci_intx_mask_supported - probe for INTx masking support 2804 * pci_intx_mask_supported - probe for INTx masking support
2805 * @pdev: the PCI device to operate on 2805 * @dev: the PCI device to operate on
2806 * 2806 *
2807 * Check if the device dev support INTx masking via the config space 2807 * Check if the device dev support INTx masking via the config space
2808 * command word. 2808 * command word.
@@ -2884,7 +2884,7 @@ done:
2884 2884
2885/** 2885/**
2886 * pci_check_and_mask_intx - mask INTx on pending interrupt 2886 * pci_check_and_mask_intx - mask INTx on pending interrupt
2887 * @pdev: the PCI device to operate on 2887 * @dev: the PCI device to operate on
2888 * 2888 *
2889 * Check if the device dev has its INTx line asserted, mask it and 2889 * Check if the device dev has its INTx line asserted, mask it and
2890 * return true in that case. False is returned if not interrupt was 2890 * return true in that case. False is returned if not interrupt was
@@ -2898,7 +2898,7 @@ EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
2898 2898
2899/** 2899/**
2900 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending 2900 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
2901 * @pdev: the PCI device to operate on 2901 * @dev: the PCI device to operate on
2902 * 2902 *
2903 * Check if the device dev has its INTx line asserted, unmask it if not 2903 * Check if the device dev has its INTx line asserted, unmask it if not
2904 * and return true. False is returned and the mask remains active if 2904 * and return true. False is returned and the mask remains active if
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index ca86f39a0fdc..e9a83f84adaf 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2731,6 +2731,8 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
2731 * @dev: struct device for the regulator 2731 * @dev: struct device for the regulator
2732 * @init_data: platform provided init data, passed through by driver 2732 * @init_data: platform provided init data, passed through by driver
2733 * @driver_data: private regulator data 2733 * @driver_data: private regulator data
2734 * @of_node: OpenFirmware node to parse for device tree bindings (may be
2735 * NULL).
2734 * 2736 *
2735 * Called by regulator drivers to register a regulator. 2737 * Called by regulator drivers to register a regulator.
2736 * Returns 0 on success. 2738 * Returns 0 on success.
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 06ea3bcfdd2a..16570aa84aac 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -830,16 +830,11 @@ config SCSI_ISCI
830 tristate "Intel(R) C600 Series Chipset SAS Controller" 830 tristate "Intel(R) C600 Series Chipset SAS Controller"
831 depends on PCI && SCSI 831 depends on PCI && SCSI
832 depends on X86 832 depends on X86
833 # (temporary): known alpha quality driver
834 depends on EXPERIMENTAL
835 select SCSI_SAS_LIBSAS 833 select SCSI_SAS_LIBSAS
836 select SCSI_SAS_HOST_SMP
837 ---help--- 834 ---help---
838 This driver supports the 6Gb/s SAS capabilities of the storage 835 This driver supports the 6Gb/s SAS capabilities of the storage
839 control unit found in the Intel(R) C600 series chipset. 836 control unit found in the Intel(R) C600 series chipset.
840 837
841 The experimental tag will be removed after the driver exits alpha
842
843config SCSI_GENERIC_NCR5380 838config SCSI_GENERIC_NCR5380
844 tristate "Generic NCR5380/53c400 SCSI PIO support" 839 tristate "Generic NCR5380/53c400 SCSI PIO support"
845 depends on ISA && SCSI 840 depends on ISA && SCSI
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 78963be2c4fb..cb07c628b2f1 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -673,12 +673,7 @@ struct bfa_itnim_iostats_s {
673 u32 tm_iocdowns; /* TM cleaned-up due to IOC down */ 673 u32 tm_iocdowns; /* TM cleaned-up due to IOC down */
674 u32 tm_cleanups; /* TM cleanup requests */ 674 u32 tm_cleanups; /* TM cleanup requests */
675 u32 tm_cleanup_comps; /* TM cleanup completions */ 675 u32 tm_cleanup_comps; /* TM cleanup completions */
676 u32 lm_lun_across_sg; /* LM lun is across sg data buf */ 676 u32 rsvd[6];
677 u32 lm_lun_not_sup; /* LM lun not supported */
678 u32 lm_rpl_data_changed; /* LM report-lun data changed */
679 u32 lm_wire_residue_changed; /* LM report-lun rsp residue changed */
680 u32 lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
681 u32 lm_lun_not_rdy; /* LM lun not ready */
682}; 677};
683 678
684/* Modify char* port_stt[] in bfal_port.c if a new state was added */ 679/* Modify char* port_stt[] in bfal_port.c if a new state was added */
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 50b6a1c86195..8d0b88f67a38 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -56,161 +56,6 @@ struct scsi_cdb_s {
56 56
57#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */ 57#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
58 58
59#define SCSI_SENSE_CUR_ERR 0x70
60#define SCSI_SENSE_DEF_ERR 0x71
61
62/*
63 * SCSI additional sense codes
64 */
65#define SCSI_ASC_LUN_NOT_READY 0x04
66#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25
67#define SCSI_ASC_TOCC 0x3F
68
69/*
70 * SCSI additional sense code qualifiers
71 */
72#define SCSI_ASCQ_MAN_INTR_REQ 0x03 /* manual intervention req */
73#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */
74
75/*
76 * Methods of reporting informational exceptions
77 */
78#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attention */
79
80struct scsi_report_luns_data_s {
81 u32 lun_list_length; /* length of LUN list length */
82 u32 reserved;
83 struct scsi_lun lun[1]; /* first LUN in lun list */
84};
85
86struct scsi_inquiry_vendor_s {
87 u8 vendor_id[8];
88};
89
90struct scsi_inquiry_prodid_s {
91 u8 product_id[16];
92};
93
94struct scsi_inquiry_prodrev_s {
95 u8 product_rev[4];
96};
97
98struct scsi_inquiry_data_s {
99#ifdef __BIG_ENDIAN
100 u8 peripheral_qual:3; /* peripheral qualifier */
101 u8 device_type:5; /* peripheral device type */
102 u8 rmb:1; /* removable medium bit */
103 u8 device_type_mod:7; /* device type modifier */
104 u8 version;
105 u8 aenc:1; /* async evt notification capability */
106 u8 trm_iop:1; /* terminate I/O process */
107 u8 norm_aca:1; /* normal ACA supported */
108 u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
109 u8 rsp_data_format:4;
110 u8 additional_len;
111 u8 sccs:1;
112 u8 reserved1:7;
113 u8 reserved2:1;
114 u8 enc_serv:1; /* enclosure service component */
115 u8 reserved3:1;
116 u8 multi_port:1; /* multi-port device */
117 u8 m_chngr:1; /* device in medium transport element */
118 u8 ack_req_q:1; /* SIP specific bit */
119 u8 addr32:1; /* SIP specific bit */
120 u8 addr16:1; /* SIP specific bit */
121 u8 rel_adr:1; /* relative address */
122 u8 w_bus32:1;
123 u8 w_bus16:1;
124 u8 synchronous:1;
125 u8 linked_commands:1;
126 u8 trans_dis:1;
127 u8 cmd_queue:1; /* command queueing supported */
128 u8 soft_reset:1; /* soft reset alternative (VS) */
129#else
130 u8 device_type:5; /* peripheral device type */
131 u8 peripheral_qual:3; /* peripheral qualifier */
132 u8 device_type_mod:7; /* device type modifier */
133 u8 rmb:1; /* removable medium bit */
134 u8 version;
135 u8 rsp_data_format:4;
136 u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
137 u8 norm_aca:1; /* normal ACA supported */
138 u8 terminate_iop:1;/* terminate I/O process */
139 u8 aenc:1; /* async evt notification capability */
140 u8 additional_len;
141 u8 reserved1:7;
142 u8 sccs:1;
143 u8 addr16:1; /* SIP specific bit */
144 u8 addr32:1; /* SIP specific bit */
145 u8 ack_req_q:1; /* SIP specific bit */
146 u8 m_chngr:1; /* device in medium transport element */
147 u8 multi_port:1; /* multi-port device */
148 u8 reserved3:1; /* TBD - Vendor Specific */
149 u8 enc_serv:1; /* enclosure service component */
150 u8 reserved2:1;
151 u8 soft_seset:1; /* soft reset alternative (VS) */
152 u8 cmd_queue:1; /* command queueing supported */
153 u8 trans_dis:1;
154 u8 linked_commands:1;
155 u8 synchronous:1;
156 u8 w_bus16:1;
157 u8 w_bus32:1;
158 u8 rel_adr:1; /* relative address */
159#endif
160 struct scsi_inquiry_vendor_s vendor_id;
161 struct scsi_inquiry_prodid_s product_id;
162 struct scsi_inquiry_prodrev_s product_rev;
163 u8 vendor_specific[20];
164 u8 reserved4[40];
165};
166
167/*
168 * SCSI sense data format
169 */
170struct scsi_sense_s {
171#ifdef __BIG_ENDIAN
172 u8 valid:1;
173 u8 rsp_code:7;
174#else
175 u8 rsp_code:7;
176 u8 valid:1;
177#endif
178 u8 seg_num;
179#ifdef __BIG_ENDIAN
180 u8 file_mark:1;
181 u8 eom:1; /* end of media */
182 u8 ili:1; /* incorrect length indicator */
183 u8 reserved:1;
184 u8 sense_key:4;
185#else
186 u8 sense_key:4;
187 u8 reserved:1;
188 u8 ili:1; /* incorrect length indicator */
189 u8 eom:1; /* end of media */
190 u8 file_mark:1;
191#endif
192 u8 information[4]; /* device-type or cmd specific info */
193 u8 add_sense_length; /* additional sense length */
194 u8 command_info[4];/* command specific information */
195 u8 asc; /* additional sense code */
196 u8 ascq; /* additional sense code qualifier */
197 u8 fru_code; /* field replaceable unit code */
198#ifdef __BIG_ENDIAN
199 u8 sksv:1; /* sense key specific valid */
200 u8 c_d:1; /* command/data bit */
201 u8 res1:2;
202 u8 bpv:1; /* bit pointer valid */
203 u8 bpointer:3; /* bit pointer */
204#else
205 u8 bpointer:3; /* bit pointer */
206 u8 bpv:1; /* bit pointer valid */
207 u8 res1:2;
208 u8 c_d:1; /* command/data bit */
209 u8 sksv:1; /* sense key specific valid */
210#endif
211 u8 fpointer[2]; /* field pointer */
212};
213
214/* 59/*
215 * Fibre Channel Header Structure (FCHS) definition 60 * Fibre Channel Header Structure (FCHS) definition
216 */ 61 */
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index e07bd4745d8b..f0f80e282e39 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -24,8 +24,6 @@ BFA_TRC_FILE(HAL, FCPIM);
24 * BFA ITNIM Related definitions 24 * BFA ITNIM Related definitions
25 */ 25 */
26static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); 26static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
27static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
28static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
29static void bfa_ioim_lm_init(struct bfa_s *bfa); 27static void bfa_ioim_lm_init(struct bfa_s *bfa);
30 28
31#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \ 29#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
@@ -60,14 +58,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
60 } \ 58 } \
61} while (0) 59} while (0)
62 60
63#define bfa_ioim_rp_wwn(__ioim) \
64 (((struct bfa_fcs_rport_s *) \
65 (__ioim)->itnim->rport->rport_drv)->pwwn)
66
67#define bfa_ioim_lp_wwn(__ioim) \
68 ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \
69 (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \
70
71#define bfa_itnim_sler_cb(__itnim) do { \ 61#define bfa_itnim_sler_cb(__itnim) do { \
72 if ((__itnim)->bfa->fcs) \ 62 if ((__itnim)->bfa->fcs) \
73 bfa_cb_itnim_sler((__itnim)->ditn); \ 63 bfa_cb_itnim_sler((__itnim)->ditn); \
@@ -77,13 +67,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
77 } \ 67 } \
78} while (0) 68} while (0)
79 69
80enum bfa_ioim_lm_status {
81 BFA_IOIM_LM_PRESENT = 1,
82 BFA_IOIM_LM_LUN_NOT_SUP = 2,
83 BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
84 BFA_IOIM_LM_LUN_NOT_RDY = 4,
85};
86
87enum bfa_ioim_lm_ua_status { 70enum bfa_ioim_lm_ua_status {
88 BFA_IOIM_LM_UA_RESET = 0, 71 BFA_IOIM_LM_UA_RESET = 0,
89 BFA_IOIM_LM_UA_SET = 1, 72 BFA_IOIM_LM_UA_SET = 1,
@@ -145,9 +128,6 @@ enum bfa_ioim_event {
145 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */ 128 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
146 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */ 129 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
147 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */ 130 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
148 BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */
149 BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */
150 BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */
151}; 131};
152 132
153 133
@@ -245,9 +225,6 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
245static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete); 225static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
246static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete); 226static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
247static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); 227static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
248static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
249static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
250static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
251 228
252/* 229/*
253 * forward declaration of BFA IO state machine 230 * forward declaration of BFA IO state machine
@@ -445,12 +422,6 @@ bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
445 bfa_fcpim_add_iostats(lstats, rstats, output_reqs); 422 bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
446 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput); 423 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
447 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput); 424 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
448 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
449 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
450 bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
451 bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
452 bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
453 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
454} 425}
455 426
456bfa_status_t 427bfa_status_t
@@ -1580,27 +1551,6 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1580 __bfa_cb_ioim_abort, ioim); 1551 __bfa_cb_ioim_abort, ioim);
1581 break; 1552 break;
1582 1553
1583 case BFA_IOIM_SM_LM_LUN_NOT_SUP:
1584 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1585 bfa_ioim_move_to_comp_q(ioim);
1586 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1587 __bfa_cb_ioim_lm_lun_not_sup, ioim);
1588 break;
1589
1590 case BFA_IOIM_SM_LM_RPL_DC:
1591 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1592 bfa_ioim_move_to_comp_q(ioim);
1593 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1594 __bfa_cb_ioim_lm_rpl_dc, ioim);
1595 break;
1596
1597 case BFA_IOIM_SM_LM_LUN_NOT_RDY:
1598 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1599 bfa_ioim_move_to_comp_q(ioim);
1600 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1601 __bfa_cb_ioim_lm_lun_not_rdy, ioim);
1602 break;
1603
1604 default: 1554 default:
1605 bfa_sm_fault(ioim->bfa, event); 1555 bfa_sm_fault(ioim->bfa, event);
1606 } 1556 }
@@ -2160,243 +2110,6 @@ bfa_ioim_lm_init(struct bfa_s *bfa)
2160 } 2110 }
2161} 2111}
2162 2112
2163/*
2164 * Validate LUN for LUN masking
2165 */
2166static enum bfa_ioim_lm_status
2167bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
2168 struct bfa_rport_s *rp, struct scsi_lun lun)
2169{
2170 u8 i;
2171 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2172 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2173 struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
2174
2175 if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
2176 (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
2177 ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
2178 return BFA_IOIM_LM_PRESENT;
2179 }
2180
2181 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2182
2183 if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2184 continue;
2185
2186 if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
2187 scsilun_to_int((struct scsi_lun *)&lun))
2188 && (rp->rport_tag == lun_list[i].rp_tag)
2189 && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
2190 lun_list[i].lp_tag)) {
2191 bfa_trc(ioim->bfa, lun_list[i].rp_tag);
2192 bfa_trc(ioim->bfa, lun_list[i].lp_tag);
2193 bfa_trc(ioim->bfa, scsilun_to_int(
2194 (struct scsi_lun *)&lun_list[i].lun));
2195
2196 if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
2197 ((cdb->scsi_cdb[0] != INQUIRY) ||
2198 (cdb->scsi_cdb[0] != REPORT_LUNS))) {
2199 lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
2200 return BFA_IOIM_LM_RPL_DATA_CHANGED;
2201 }
2202
2203 if (cdb->scsi_cdb[0] == REPORT_LUNS)
2204 ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
2205
2206 return BFA_IOIM_LM_PRESENT;
2207 }
2208 }
2209
2210 if ((cdb->scsi_cdb[0] == INQUIRY) &&
2211 (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
2212 ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
2213 return BFA_IOIM_LM_PRESENT;
2214 }
2215
2216 if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
2217 return BFA_IOIM_LM_LUN_NOT_RDY;
2218
2219 return BFA_IOIM_LM_LUN_NOT_SUP;
2220}
2221
2222static bfa_boolean_t
2223bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
2224{
2225 return BFA_TRUE;
2226}
2227
2228static void
2229bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
2230 int buf_lun_cnt)
2231{
2232 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2233 struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
2234 struct scsi_lun lun;
2235 int i, j;
2236
2237 bfa_trc(ioim->bfa, buf_lun_cnt);
2238 for (j = 0; j < buf_lun_cnt; j++) {
2239 lun = *((struct scsi_lun *)(lun_data + j));
2240 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2241 if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2242 continue;
2243 if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
2244 (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
2245 (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
2246 == scsilun_to_int((struct scsi_lun *)&lun))) {
2247 lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
2248 break;
2249 }
2250 } /* next lun in mask DB */
2251 } /* next lun in buf */
2252}
2253
2254static int
2255bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
2256 struct scsi_report_luns_data_s *rl)
2257{
2258 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2259 struct scatterlist *sg = scsi_sglist(cmnd);
2260 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2261 struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
2262 int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
2263 int lun_across_sg_bytes, bytes_from_next_buf;
2264 u64 last_lun, temp_last_lun;
2265
2266 /* fetch luns from the first sg element */
2267 bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
2268 (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
2269
2270 /* fetch luns from multiple sg elements */
2271 scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
2272 if (sgeid == 0) {
2273 prev_sg_len = sg_dma_len(sg);
2274 prev_rl_data = (struct scsi_lun *)
2275 phys_to_virt(sg_dma_address(sg));
2276 continue;
2277 }
2278
2279 /* if the buf is having more data */
2280 lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
2281 if (lun_across_sg_bytes) {
2282 bfa_trc(ioim->bfa, lun_across_sg_bytes);
2283 bfa_stats(ioim->itnim, lm_lun_across_sg);
2284 bytes_from_next_buf = sizeof(struct scsi_lun) -
2285 lun_across_sg_bytes;
2286
2287 /* from next buf take higher bytes */
2288 temp_last_lun = *((u64 *)
2289 phys_to_virt(sg_dma_address(sg)));
2290 last_lun |= temp_last_lun >>
2291 (lun_across_sg_bytes * BITS_PER_BYTE);
2292
2293 /* from prev buf take higher bytes */
2294 temp_last_lun = *((u64 *)(prev_rl_data +
2295 (prev_sg_len - lun_across_sg_bytes)));
2296 temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
2297 last_lun = last_lun | (temp_last_lun <<
2298 (bytes_from_next_buf * BITS_PER_BYTE));
2299
2300 bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
2301 } else
2302 bytes_from_next_buf = 0;
2303
2304 *pgdlen += sg_dma_len(sg);
2305 prev_sg_len = sg_dma_len(sg);
2306 prev_rl_data = (struct scsi_lun *)
2307 phys_to_virt(sg_dma_address(sg));
2308 bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
2309 bytes_from_next_buf,
2310 sg_dma_len(sg) / sizeof(struct scsi_lun));
2311 }
2312
2313 /* update the report luns data - based on fetched luns */
2314 sg = scsi_sglist(cmnd);
2315 base_rl_data = (struct scsi_lun *)rl->lun;
2316 base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
2317 for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
2318 if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
2319 base_rl_data[j] = lun_list[i].lun;
2320 lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
2321 j++;
2322 lun_fetched_cnt++;
2323 }
2324
2325 if (j > base_count) {
2326 j = 0;
2327 sg = sg_next(sg);
2328 base_rl_data = (struct scsi_lun *)
2329 phys_to_virt(sg_dma_address(sg));
2330 base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
2331 }
2332 }
2333
2334 bfa_trc(ioim->bfa, lun_fetched_cnt);
2335 return lun_fetched_cnt;
2336}
2337
2338static bfa_boolean_t
2339bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
2340{
2341 struct scsi_inquiry_data_s *inq;
2342 struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
2343
2344 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2345 inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
2346
2347 bfa_trc(ioim->bfa, inq->device_type);
2348 inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
2349 return 0;
2350}
2351
2352static bfa_boolean_t
2353bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
2354{
2355 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2356 struct scatterlist *sg = scsi_sglist(cmnd);
2357 struct bfi_ioim_rsp_s *m;
2358 struct scsi_report_luns_data_s *rl = NULL;
2359 int lun_count = 0, lun_fetched_cnt = 0;
2360 u32 residue, pgdlen = 0;
2361
2362 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2363 if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
2364 return BFA_TRUE;
2365
2366 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2367 if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
2368 return BFA_TRUE;
2369
2370 pgdlen = sg_dma_len(sg);
2371 bfa_trc(ioim->bfa, pgdlen);
2372 rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
2373 lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
2374 lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
2375
2376 if (lun_count == lun_fetched_cnt)
2377 return BFA_TRUE;
2378
2379 bfa_trc(ioim->bfa, lun_count);
2380 bfa_trc(ioim->bfa, lun_fetched_cnt);
2381 bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
2382
2383 if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
2384 rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
2385 sizeof(struct scsi_lun);
2386 else
2387 bfa_stats(ioim->itnim, lm_small_buf_addresidue);
2388
2389 bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
2390 bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
2391
2392 residue = be32_to_cpu(m->residue);
2393 residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
2394 bfa_stats(ioim->itnim, lm_wire_residue_changed);
2395 m->residue = be32_to_cpu(residue);
2396 bfa_trc(ioim->bfa, ioim->nsges);
2397 return BFA_FALSE;
2398}
2399
2400static void 2113static void
2401__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete) 2114__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2402{ 2115{
@@ -2454,83 +2167,6 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2454 m->scsi_status, sns_len, snsinfo, residue); 2167 m->scsi_status, sns_len, snsinfo, residue);
2455} 2168}
2456 2169
2457static void
2458__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
2459{
2460 struct bfa_ioim_s *ioim = cbarg;
2461 int sns_len = 0xD;
2462 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2463 struct scsi_sense_s *snsinfo;
2464
2465 if (!complete) {
2466 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2467 return;
2468 }
2469
2470 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
2471 ioim->fcpim->fcp, ioim->iotag);
2472 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2473 snsinfo->add_sense_length = 0xa;
2474 snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
2475 snsinfo->sense_key = ILLEGAL_REQUEST;
2476 bfa_trc(ioim->bfa, residue);
2477 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2478 SCSI_STATUS_CHECK_CONDITION, sns_len,
2479 (u8 *)snsinfo, residue);
2480}
2481
2482static void
2483__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
2484{
2485 struct bfa_ioim_s *ioim = cbarg;
2486 int sns_len = 0xD;
2487 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2488 struct scsi_sense_s *snsinfo;
2489
2490 if (!complete) {
2491 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2492 return;
2493 }
2494
2495 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2496 ioim->iotag);
2497 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2498 snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
2499 snsinfo->asc = SCSI_ASC_TOCC;
2500 snsinfo->add_sense_length = 0x6;
2501 snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
2502 bfa_trc(ioim->bfa, residue);
2503 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2504 SCSI_STATUS_CHECK_CONDITION, sns_len,
2505 (u8 *)snsinfo, residue);
2506}
2507
2508static void
2509__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
2510{
2511 struct bfa_ioim_s *ioim = cbarg;
2512 int sns_len = 0xD;
2513 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2514 struct scsi_sense_s *snsinfo;
2515
2516 if (!complete) {
2517 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2518 return;
2519 }
2520
2521 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
2522 ioim->fcpim->fcp, ioim->iotag);
2523 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2524 snsinfo->add_sense_length = 0xa;
2525 snsinfo->sense_key = NOT_READY;
2526 snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
2527 snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
2528 bfa_trc(ioim->bfa, residue);
2529 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2530 SCSI_STATUS_CHECK_CONDITION, sns_len,
2531 (u8 *)snsinfo, residue);
2532}
2533
2534void 2170void
2535bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn, 2171bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
2536 u16 rp_tag, u8 lp_tag) 2172 u16 rp_tag, u8 lp_tag)
@@ -2647,7 +2283,8 @@ bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2647 if (port) { 2283 if (port) {
2648 *pwwn = port->port_cfg.pwwn; 2284 *pwwn = port->port_cfg.pwwn;
2649 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn); 2285 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2650 rp = rp_fcs->bfa_rport; 2286 if (rp_fcs)
2287 rp = rp_fcs->bfa_rport;
2651 } 2288 }
2652 2289
2653 lunm_list = bfa_get_lun_mask_list(bfa); 2290 lunm_list = bfa_get_lun_mask_list(bfa);
@@ -2715,7 +2352,8 @@ bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2715 if (port) { 2352 if (port) {
2716 *pwwn = port->port_cfg.pwwn; 2353 *pwwn = port->port_cfg.pwwn;
2717 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn); 2354 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2718 rp = rp_fcs->bfa_rport; 2355 if (rp_fcs)
2356 rp = rp_fcs->bfa_rport;
2719 } 2357 }
2720 } 2358 }
2721 2359
@@ -2757,7 +2395,6 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2757 return; 2395 return;
2758 } 2396 }
2759 2397
2760 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2761 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED, 2398 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2762 0, 0, NULL, 0); 2399 0, 0, NULL, 0);
2763} 2400}
@@ -2773,7 +2410,6 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2773 return; 2410 return;
2774 } 2411 }
2775 2412
2776 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2777 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV, 2413 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2778 0, 0, NULL, 0); 2414 0, 0, NULL, 0);
2779} 2415}
@@ -2788,7 +2424,6 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2788 return; 2424 return;
2789 } 2425 }
2790 2426
2791 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2792 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio); 2427 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2793} 2428}
2794 2429
@@ -3132,7 +2767,6 @@ bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
3132 ioim->bfa = fcpim->bfa; 2767 ioim->bfa = fcpim->bfa;
3133 ioim->fcpim = fcpim; 2768 ioim->fcpim = fcpim;
3134 ioim->iosp = iosp; 2769 ioim->iosp = iosp;
3135 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
3136 INIT_LIST_HEAD(&ioim->sgpg_q); 2770 INIT_LIST_HEAD(&ioim->sgpg_q);
3137 bfa_reqq_winit(&ioim->iosp->reqq_wait, 2771 bfa_reqq_winit(&ioim->iosp->reqq_wait,
3138 bfa_ioim_qresume, ioim); 2772 bfa_ioim_qresume, ioim);
@@ -3170,7 +2804,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3170 evt = BFA_IOIM_SM_DONE; 2804 evt = BFA_IOIM_SM_DONE;
3171 else 2805 else
3172 evt = BFA_IOIM_SM_COMP; 2806 evt = BFA_IOIM_SM_COMP;
3173 ioim->proc_rsp_data(ioim);
3174 break; 2807 break;
3175 2808
3176 case BFI_IOIM_STS_TIMEDOUT: 2809 case BFI_IOIM_STS_TIMEDOUT:
@@ -3206,7 +2839,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3206 if (rsp->abort_tag != ioim->abort_tag) { 2839 if (rsp->abort_tag != ioim->abort_tag) {
3207 bfa_trc(ioim->bfa, rsp->abort_tag); 2840 bfa_trc(ioim->bfa, rsp->abort_tag);
3208 bfa_trc(ioim->bfa, ioim->abort_tag); 2841 bfa_trc(ioim->bfa, ioim->abort_tag);
3209 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
3210 return; 2842 return;
3211 } 2843 }
3212 2844
@@ -3225,7 +2857,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3225 WARN_ON(1); 2857 WARN_ON(1);
3226 } 2858 }
3227 2859
3228 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
3229 bfa_sm_send_event(ioim, evt); 2860 bfa_sm_send_event(ioim, evt);
3230} 2861}
3231 2862
@@ -3244,15 +2875,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3244 2875
3245 bfa_ioim_cb_profile_comp(fcpim, ioim); 2876 bfa_ioim_cb_profile_comp(fcpim, ioim);
3246 2877
3247 if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) { 2878 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3248 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3249 return;
3250 }
3251
3252 if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
3253 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3254 else
3255 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
3256} 2879}
3257 2880
3258/* 2881/*
@@ -3364,35 +2987,6 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
3364void 2987void
3365bfa_ioim_start(struct bfa_ioim_s *ioim) 2988bfa_ioim_start(struct bfa_ioim_s *ioim)
3366{ 2989{
3367 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
3368 struct bfa_lps_s *lps;
3369 enum bfa_ioim_lm_status status;
3370 struct scsi_lun scsilun;
3371
3372 if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
3373 lps = BFA_IOIM_TO_LPS(ioim);
3374 int_to_scsilun(cmnd->device->lun, &scsilun);
3375 status = bfa_ioim_lm_check(ioim, lps,
3376 ioim->itnim->rport, scsilun);
3377 if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
3378 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
3379 bfa_stats(ioim->itnim, lm_lun_not_rdy);
3380 return;
3381 }
3382
3383 if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
3384 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
3385 bfa_stats(ioim->itnim, lm_lun_not_sup);
3386 return;
3387 }
3388
3389 if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
3390 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
3391 bfa_stats(ioim->itnim, lm_rpl_data_changed);
3392 return;
3393 }
3394 }
3395
3396 bfa_ioim_cb_profile_start(ioim->fcpim, ioim); 2990 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
3397 2991
3398 /* 2992 /*
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 1080bcb81cb7..36f26da80f76 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -110,7 +110,6 @@ struct bfad_ioim_s;
110struct bfad_tskim_s; 110struct bfad_tskim_s;
111 111
112typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim); 112typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
113typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
114 113
115struct bfa_fcpim_s { 114struct bfa_fcpim_s {
116 struct bfa_s *bfa; 115 struct bfa_s *bfa;
@@ -124,7 +123,6 @@ struct bfa_fcpim_s {
124 u32 path_tov; 123 u32 path_tov;
125 u16 q_depth; 124 u16 q_depth;
126 u8 reqq; /* Request queue to be used */ 125 u8 reqq; /* Request queue to be used */
127 u8 lun_masking_pending;
128 struct list_head itnim_q; /* queue of active itnim */ 126 struct list_head itnim_q; /* queue of active itnim */
129 struct list_head ioim_resfree_q; /* IOs waiting for f/w */ 127 struct list_head ioim_resfree_q; /* IOs waiting for f/w */
130 struct list_head ioim_comp_q; /* IO global comp Q */ 128 struct list_head ioim_comp_q; /* IO global comp Q */
@@ -181,7 +179,6 @@ struct bfa_ioim_s {
181 u8 reqq; /* Request queue for I/O */ 179 u8 reqq; /* Request queue for I/O */
182 u8 mode; /* IO is passthrough or not */ 180 u8 mode; /* IO is passthrough or not */
183 u64 start_time; /* IO's Profile start val */ 181 u64 start_time; /* IO's Profile start val */
184 bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
185}; 182};
186 183
187struct bfa_ioim_sp_s { 184struct bfa_ioim_sp_s {
@@ -261,10 +258,6 @@ struct bfa_itnim_s {
261 (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \ 258 (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \
262} while (0) 259} while (0)
263 260
264#define BFA_IOIM_TO_LPS(__ioim) \
265 BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa), \
266 __ioim->itnim->rport->rport_info.lp_tag)
267
268static inline bfa_boolean_t 261static inline bfa_boolean_t
269bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim) 262bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
270{ 263{
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index 95adb86d3769..b52cbb6bcd5a 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -582,11 +582,6 @@ void bfa_cb_rport_qos_scn_prio(void *rport,
582#define BFA_LP_TAG_INVALID 0xff 582#define BFA_LP_TAG_INVALID 0xff
583void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp); 583void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
584void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp); 584void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
585bfa_boolean_t bfa_rport_lunmask_active(struct bfa_rport_s *rp);
586wwn_t bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
587struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
588 wwn_t *lpwwn, wwn_t rpwwn);
589void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
590 585
591/* 586/*
592 * bfa fcxp API functions 587 * bfa fcxp API functions
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 66fb72531b34..404fd10ddb21 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -674,6 +674,7 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
674 674
675 spin_lock_irqsave(&bfad->bfad_lock, flags); 675 spin_lock_irqsave(&bfad->bfad_lock, flags);
676 bfa_fcs_vport_start(&vport->fcs_vport); 676 bfa_fcs_vport_start(&vport->fcs_vport);
677 list_add_tail(&vport->list_entry, &bfad->vport_list);
677 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 678 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
678 679
679 return BFA_STATUS_OK; 680 return BFA_STATUS_OK;
@@ -1404,6 +1405,7 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1404 bfad->ref_count = 0; 1405 bfad->ref_count = 0;
1405 bfad->pport.bfad = bfad; 1406 bfad->pport.bfad = bfad;
1406 INIT_LIST_HEAD(&bfad->pbc_vport_list); 1407 INIT_LIST_HEAD(&bfad->pbc_vport_list);
1408 INIT_LIST_HEAD(&bfad->vport_list);
1407 1409
1408 /* Setup the debugfs node for this bfad */ 1410 /* Setup the debugfs node for this bfad */
1409 if (bfa_debugfs_enable) 1411 if (bfa_debugfs_enable)
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 9d95844ab463..1938fe0473e9 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -491,7 +491,7 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
491 491
492free_scsi_host: 492free_scsi_host:
493 bfad_scsi_host_free(bfad, im_port); 493 bfad_scsi_host_free(bfad, im_port);
494 494 list_del(&vport->list_entry);
495 kfree(vport); 495 kfree(vport);
496 496
497 return 0; 497 return 0;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 06fc00caeb41..530de2b1200a 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -2394,6 +2394,21 @@ out:
2394 return 0; 2394 return 0;
2395} 2395}
2396 2396
2397/* Function to reset the LUN SCAN mode */
2398static void
2399bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
2400{
2401 struct bfad_im_port_s *pport_im = bfad->pport.im_port;
2402 struct bfad_vport_s *vport = NULL;
2403
2404 /* Set the scsi device LUN SCAN flags for base port */
2405 bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
2406
2407 /* Set the scsi device LUN SCAN flags for the vports */
2408 list_for_each_entry(vport, &bfad->vport_list, list_entry)
2409 bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
2410}
2411
2397int 2412int
2398bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd) 2413bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
2399{ 2414{
@@ -2401,11 +2416,17 @@ bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
2401 unsigned long flags; 2416 unsigned long flags;
2402 2417
2403 spin_lock_irqsave(&bfad->bfad_lock, flags); 2418 spin_lock_irqsave(&bfad->bfad_lock, flags);
2404 if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) 2419 if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
2405 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE); 2420 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
2406 else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) 2421 /* Set the LUN Scanning mode to be Sequential scan */
2422 if (iocmd->status == BFA_STATUS_OK)
2423 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
2424 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
2407 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE); 2425 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
2408 else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR) 2426 /* Set the LUN Scanning mode to default REPORT_LUNS scan */
2427 if (iocmd->status == BFA_STATUS_OK)
2428 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
2429 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
2409 iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa); 2430 iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
2410 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2431 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2411 return 0; 2432 return 0;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 5e19a5f820ec..dc5b9d99c450 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -43,6 +43,7 @@
43#include <scsi/scsi_transport_fc.h> 43#include <scsi/scsi_transport_fc.h>
44#include <scsi/scsi_transport.h> 44#include <scsi/scsi_transport.h>
45#include <scsi/scsi_bsg_fc.h> 45#include <scsi/scsi_bsg_fc.h>
46#include <scsi/scsi_devinfo.h>
46 47
47#include "bfa_modules.h" 48#include "bfa_modules.h"
48#include "bfa_fcs.h" 49#include "bfa_fcs.h"
@@ -227,6 +228,7 @@ struct bfad_s {
227 struct list_head active_aen_q; 228 struct list_head active_aen_q;
228 struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY]; 229 struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY];
229 spinlock_t bfad_aen_spinlock; 230 spinlock_t bfad_aen_spinlock;
231 struct list_head vport_list;
230}; 232};
231 233
232/* BFAD state machine events */ 234/* BFAD state machine events */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index e5db649e8eb7..3153923f5b60 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -918,16 +918,70 @@ bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
918} 918}
919 919
920/* 920/*
921 * Function is invoked from the SCSI Host Template slave_alloc() entry point.
922 * Has the logic to query the LUN Mask database to check if this LUN needs to
923 * be made visible to the SCSI mid-layer or not.
924 *
925 * Returns BFA_STATUS_OK if this LUN needs to be added to the OS stack.
926 * Returns -ENXIO to notify SCSI mid-layer to not add this LUN to the OS stack.
927 */
928static int
929bfad_im_check_if_make_lun_visible(struct scsi_device *sdev,
930 struct fc_rport *rport)
931{
932 struct bfad_itnim_data_s *itnim_data =
933 (struct bfad_itnim_data_s *) rport->dd_data;
934 struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
935 struct bfa_rport_s *bfa_rport = itnim_data->itnim->bfa_itnim->rport;
936 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(bfa);
937 int i = 0, ret = -ENXIO;
938
939 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
940 if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE &&
941 scsilun_to_int(&lun_list[i].lun) == sdev->lun &&
942 lun_list[i].rp_tag == bfa_rport->rport_tag &&
943 lun_list[i].lp_tag == (u8)bfa_rport->rport_info.lp_tag) {
944 ret = BFA_STATUS_OK;
945 break;
946 }
947 }
948 return ret;
949}
950
951/*
921 * Scsi_Host template entry slave_alloc 952 * Scsi_Host template entry slave_alloc
922 */ 953 */
923static int 954static int
924bfad_im_slave_alloc(struct scsi_device *sdev) 955bfad_im_slave_alloc(struct scsi_device *sdev)
925{ 956{
926 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 957 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
958 struct bfad_itnim_data_s *itnim_data =
959 (struct bfad_itnim_data_s *) rport->dd_data;
960 struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
927 961
928 if (!rport || fc_remote_port_chkready(rport)) 962 if (!rport || fc_remote_port_chkready(rport))
929 return -ENXIO; 963 return -ENXIO;
930 964
965 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) {
966 /*
967 * We should not mask LUN 0 - since this will translate
968 * to no LUN / TARGET for SCSI ml resulting no scan.
969 */
970 if (sdev->lun == 0) {
971 sdev->sdev_bflags |= BLIST_NOREPORTLUN |
972 BLIST_SPARSELUN;
973 goto done;
974 }
975
976 /*
977 * Query LUN Mask configuration - to expose this LUN
978 * to the SCSI mid-layer or to mask it.
979 */
980 if (bfad_im_check_if_make_lun_visible(sdev, rport) !=
981 BFA_STATUS_OK)
982 return -ENXIO;
983 }
984done:
931 sdev->hostdata = rport->dd_data; 985 sdev->hostdata = rport->dd_data;
932 986
933 return 0; 987 return 0;
@@ -1037,6 +1091,8 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
1037 && (fc_rport->scsi_target_id < MAX_FCP_TARGET)) 1091 && (fc_rport->scsi_target_id < MAX_FCP_TARGET))
1038 itnim->scsi_tgt_id = fc_rport->scsi_target_id; 1092 itnim->scsi_tgt_id = fc_rport->scsi_target_id;
1039 1093
1094 itnim->channel = fc_rport->channel;
1095
1040 return; 1096 return;
1041} 1097}
1042 1098
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 004b6cf848d9..0814367ef101 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -91,6 +91,7 @@ struct bfad_itnim_s {
91 struct fc_rport *fc_rport; 91 struct fc_rport *fc_rport;
92 struct bfa_itnim_s *bfa_itnim; 92 struct bfa_itnim_s *bfa_itnim;
93 u16 scsi_tgt_id; 93 u16 scsi_tgt_id;
94 u16 channel;
94 u16 queue_work; 95 u16 queue_work;
95 unsigned long last_ramp_up_time; 96 unsigned long last_ramp_up_time;
96 unsigned long last_queue_full_time; 97 unsigned long last_queue_full_time;
@@ -166,4 +167,30 @@ irqreturn_t bfad_intx(int irq, void *dev_id);
166int bfad_im_bsg_request(struct fc_bsg_job *job); 167int bfad_im_bsg_request(struct fc_bsg_job *job);
167int bfad_im_bsg_timeout(struct fc_bsg_job *job); 168int bfad_im_bsg_timeout(struct fc_bsg_job *job);
168 169
170/*
171 * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the
172 * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan
173 *
174 * Internally iterate's over all the ITNIM's part of the im_port & set's the
175 * sdev_bflags for the scsi_device associated with LUN #0.
176 */
177#define bfad_reset_sdev_bflags(__im_port, __lunmask_cfg) do { \
178 struct scsi_device *__sdev = NULL; \
179 struct bfad_itnim_s *__itnim = NULL; \
180 u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN; \
181 list_for_each_entry(__itnim, &((__im_port)->itnim_mapped_list), \
182 list_entry) { \
183 __sdev = scsi_device_lookup((__im_port)->shost, \
184 __itnim->channel, \
185 __itnim->scsi_tgt_id, 0); \
186 if (__sdev) { \
187 if ((__lunmask_cfg) == BFA_TRUE) \
188 __sdev->sdev_bflags |= scan_flags; \
189 else \
190 __sdev->sdev_bflags &= ~scan_flags; \
191 scsi_device_put(__sdev); \
192 } \
193 } \
194} while (0)
195
169#endif 196#endif
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index c5360ffb4bed..d3ff9cd40234 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1868,8 +1868,9 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
1868 1868
1869 tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC); 1869 tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
1870 if (!tdata->skb) { 1870 if (!tdata->skb) {
1871 pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n", 1871 struct cxgbi_sock *csk = cconn->cep->csk;
1872 cdev->skb_tx_rsvd, headroom, opcode); 1872 struct net_device *ndev = cdev->ports[csk->port_id];
1873 ndev->stats.tx_dropped++;
1873 return -ENOMEM; 1874 return -ENOMEM;
1874 } 1875 }
1875 1876
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 4ef021291a4d..04c5cea47a22 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -466,6 +466,11 @@ static int alua_check_sense(struct scsi_device *sdev,
466 * Power On, Reset, or Bus Device Reset, just retry. 466 * Power On, Reset, or Bus Device Reset, just retry.
467 */ 467 */
468 return ADD_TO_MLQUEUE; 468 return ADD_TO_MLQUEUE;
469 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
470 /*
471 * Mode Parameters Changed
472 */
473 return ADD_TO_MLQUEUE;
469 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) 474 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06)
470 /* 475 /*
471 * ALUA state changed 476 * ALUA state changed
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 841ebf4a6788..53a31c753cb1 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -953,6 +953,8 @@ static int __init rdac_init(void)
953 if (!kmpath_rdacd) { 953 if (!kmpath_rdacd) {
954 scsi_unregister_device_handler(&rdac_dh); 954 scsi_unregister_device_handler(&rdac_dh);
955 printk(KERN_ERR "kmpath_rdacd creation failed.\n"); 955 printk(KERN_ERR "kmpath_rdacd creation failed.\n");
956
957 r = -EINVAL;
956 } 958 }
957done: 959done:
958 return r; 960 return r;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 8d67467dd9ce..e9599600aa23 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -58,7 +58,11 @@ module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
58MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \ 58MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
59 "Direct Data Placement (DDP)."); 59 "Direct Data Placement (DDP).");
60 60
61DEFINE_MUTEX(fcoe_config_mutex); 61unsigned int fcoe_debug_logging;
62module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
63MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
64
65static DEFINE_MUTEX(fcoe_config_mutex);
62 66
63static struct workqueue_struct *fcoe_wq; 67static struct workqueue_struct *fcoe_wq;
64 68
@@ -67,8 +71,8 @@ static DECLARE_COMPLETION(fcoe_flush_completion);
67 71
68/* fcoe host list */ 72/* fcoe host list */
69/* must only by accessed under the RTNL mutex */ 73/* must only by accessed under the RTNL mutex */
70LIST_HEAD(fcoe_hostlist); 74static LIST_HEAD(fcoe_hostlist);
71DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); 75static DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
72 76
73/* Function Prototypes */ 77/* Function Prototypes */
74static int fcoe_reset(struct Scsi_Host *); 78static int fcoe_reset(struct Scsi_Host *);
@@ -157,7 +161,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
157 .lport_set_port_id = fcoe_set_port_id, 161 .lport_set_port_id = fcoe_set_port_id,
158}; 162};
159 163
160struct fc_function_template fcoe_nport_fc_functions = { 164static struct fc_function_template fcoe_nport_fc_functions = {
161 .show_host_node_name = 1, 165 .show_host_node_name = 1,
162 .show_host_port_name = 1, 166 .show_host_port_name = 1,
163 .show_host_supported_classes = 1, 167 .show_host_supported_classes = 1,
@@ -197,7 +201,7 @@ struct fc_function_template fcoe_nport_fc_functions = {
197 .bsg_request = fc_lport_bsg_request, 201 .bsg_request = fc_lport_bsg_request,
198}; 202};
199 203
200struct fc_function_template fcoe_vport_fc_functions = { 204static struct fc_function_template fcoe_vport_fc_functions = {
201 .show_host_node_name = 1, 205 .show_host_node_name = 1,
202 .show_host_port_name = 1, 206 .show_host_port_name = 1,
203 .show_host_supported_classes = 1, 207 .show_host_supported_classes = 1,
@@ -433,7 +437,7 @@ static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
433 * 437 *
434 * Caller must be holding the RTNL mutex 438 * Caller must be holding the RTNL mutex
435 */ 439 */
436void fcoe_interface_cleanup(struct fcoe_interface *fcoe) 440static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
437{ 441{
438 struct net_device *netdev = fcoe->netdev; 442 struct net_device *netdev = fcoe->netdev;
439 struct fcoe_ctlr *fip = &fcoe->ctlr; 443 struct fcoe_ctlr *fip = &fcoe->ctlr;
@@ -748,7 +752,7 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
748 * 752 *
749 * Returns: True for read types I/O, otherwise returns false. 753 * Returns: True for read types I/O, otherwise returns false.
750 */ 754 */
751bool fcoe_oem_match(struct fc_frame *fp) 755static bool fcoe_oem_match(struct fc_frame *fp)
752{ 756{
753 struct fc_frame_header *fh = fc_frame_header_get(fp); 757 struct fc_frame_header *fh = fc_frame_header_get(fp);
754 struct fcp_cmnd *fcp; 758 struct fcp_cmnd *fcp;
@@ -756,11 +760,12 @@ bool fcoe_oem_match(struct fc_frame *fp)
756 if (fc_fcp_is_read(fr_fsp(fp)) && 760 if (fc_fcp_is_read(fr_fsp(fp)) &&
757 (fr_fsp(fp)->data_len > fcoe_ddp_min)) 761 (fr_fsp(fp)->data_len > fcoe_ddp_min))
758 return true; 762 return true;
759 else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) { 763 else if ((fr_fsp(fp) == NULL) &&
764 (fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) &&
765 (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
760 fcp = fc_frame_payload_get(fp, sizeof(*fcp)); 766 fcp = fc_frame_payload_get(fp, sizeof(*fcp));
761 if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN && 767 if ((fcp->fc_flags & FCP_CFL_WRDATA) &&
762 fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) && 768 (ntohl(fcp->fc_dl) > fcoe_ddp_min))
763 (fcp->fc_flags & FCP_CFL_WRDATA))
764 return true; 769 return true;
765 } 770 }
766 return false; 771 return false;
@@ -1106,7 +1111,7 @@ static int __init fcoe_if_init(void)
1106 * 1111 *
1107 * Returns: 0 on success 1112 * Returns: 0 on success
1108 */ 1113 */
1109int __exit fcoe_if_exit(void) 1114static int __exit fcoe_if_exit(void)
1110{ 1115{
1111 fc_release_transport(fcoe_nport_scsi_transport); 1116 fc_release_transport(fcoe_nport_scsi_transport);
1112 fc_release_transport(fcoe_vport_scsi_transport); 1117 fc_release_transport(fcoe_vport_scsi_transport);
@@ -1295,7 +1300,7 @@ static inline unsigned int fcoe_select_cpu(void)
1295 * 1300 *
1296 * Returns: 0 for success 1301 * Returns: 0 for success
1297 */ 1302 */
1298int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, 1303static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1299 struct packet_type *ptype, struct net_device *olddev) 1304 struct packet_type *ptype, struct net_device *olddev)
1300{ 1305{
1301 struct fc_lport *lport; 1306 struct fc_lport *lport;
@@ -1451,7 +1456,7 @@ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
1451 * 1456 *
1452 * Return: 0 for success 1457 * Return: 0 for success
1453 */ 1458 */
1454int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) 1459static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1455{ 1460{
1456 int wlen; 1461 int wlen;
1457 u32 crc; 1462 u32 crc;
@@ -1671,8 +1676,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
1671 skb->dev ? skb->dev->name : "<NULL>"); 1676 skb->dev ? skb->dev->name : "<NULL>");
1672 1677
1673 port = lport_priv(lport); 1678 port = lport_priv(lport);
1674 if (skb_is_nonlinear(skb)) 1679 skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */
1675 skb_linearize(skb); /* not ideal */
1676 1680
1677 /* 1681 /*
1678 * Frame length checks and setting up the header pointers 1682 * Frame length checks and setting up the header pointers
@@ -1728,7 +1732,7 @@ drop:
1728 * 1732 *
1729 * Return: 0 for success 1733 * Return: 0 for success
1730 */ 1734 */
1731int fcoe_percpu_receive_thread(void *arg) 1735static int fcoe_percpu_receive_thread(void *arg)
1732{ 1736{
1733 struct fcoe_percpu_s *p = arg; 1737 struct fcoe_percpu_s *p = arg;
1734 struct sk_buff *skb; 1738 struct sk_buff *skb;
@@ -2146,7 +2150,7 @@ out_nortnl:
2146 * Returns: 0 if the ethtool query was successful 2150 * Returns: 0 if the ethtool query was successful
2147 * -1 if the ethtool query failed 2151 * -1 if the ethtool query failed
2148 */ 2152 */
2149int fcoe_link_speed_update(struct fc_lport *lport) 2153static int fcoe_link_speed_update(struct fc_lport *lport)
2150{ 2154{
2151 struct net_device *netdev = fcoe_netdev(lport); 2155 struct net_device *netdev = fcoe_netdev(lport);
2152 struct ethtool_cmd ecmd; 2156 struct ethtool_cmd ecmd;
@@ -2180,7 +2184,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
2180 * Returns: 0 if link is UP and OK, -1 if not 2184 * Returns: 0 if link is UP and OK, -1 if not
2181 * 2185 *
2182 */ 2186 */
2183int fcoe_link_ok(struct fc_lport *lport) 2187static int fcoe_link_ok(struct fc_lport *lport)
2184{ 2188{
2185 struct net_device *netdev = fcoe_netdev(lport); 2189 struct net_device *netdev = fcoe_netdev(lport);
2186 2190
@@ -2200,7 +2204,7 @@ int fcoe_link_ok(struct fc_lport *lport)
2200 * there no packets that will be handled by the lport, but also that any 2204 * there no packets that will be handled by the lport, but also that any
2201 * threads already handling packet have returned. 2205 * threads already handling packet have returned.
2202 */ 2206 */
2203void fcoe_percpu_clean(struct fc_lport *lport) 2207static void fcoe_percpu_clean(struct fc_lport *lport)
2204{ 2208{
2205 struct fcoe_percpu_s *pp; 2209 struct fcoe_percpu_s *pp;
2206 struct fcoe_rcv_info *fr; 2210 struct fcoe_rcv_info *fr;
@@ -2251,7 +2255,7 @@ void fcoe_percpu_clean(struct fc_lport *lport)
2251 * 2255 *
2252 * Returns: Always 0 (return value required by FC transport template) 2256 * Returns: Always 0 (return value required by FC transport template)
2253 */ 2257 */
2254int fcoe_reset(struct Scsi_Host *shost) 2258static int fcoe_reset(struct Scsi_Host *shost)
2255{ 2259{
2256 struct fc_lport *lport = shost_priv(shost); 2260 struct fc_lport *lport = shost_priv(shost);
2257 struct fcoe_port *port = lport_priv(lport); 2261 struct fcoe_port *port = lport_priv(lport);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 6c6884bcf840..bcc89e639495 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -40,9 +40,7 @@
40#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */ 40#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
41#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */ 41#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
42 42
43unsigned int fcoe_debug_logging; 43extern unsigned int fcoe_debug_logging;
44module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
45MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
46 44
47#define FCOE_LOGGING 0x01 /* General logging, not categorized */ 45#define FCOE_LOGGING 0x01 /* General logging, not categorized */
48#define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */ 46#define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 5140f5d0fd6b..b96962c39449 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -4271,7 +4271,9 @@ static void stop_controller_lockup_detector(struct ctlr_info *h)
4271 remove_ctlr_from_lockup_detector_list(h); 4271 remove_ctlr_from_lockup_detector_list(h);
4272 /* If the list of ctlr's to monitor is empty, stop the thread */ 4272 /* If the list of ctlr's to monitor is empty, stop the thread */
4273 if (list_empty(&hpsa_ctlr_list)) { 4273 if (list_empty(&hpsa_ctlr_list)) {
4274 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4274 kthread_stop(hpsa_lockup_detector); 4275 kthread_stop(hpsa_lockup_detector);
4276 spin_lock_irqsave(&lockup_detector_lock, flags);
4275 hpsa_lockup_detector = NULL; 4277 hpsa_lockup_detector = NULL;
4276 } 4278 }
4277 spin_unlock_irqrestore(&lockup_detector_lock, flags); 4279 spin_unlock_irqrestore(&lockup_detector_lock, flags);
diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile
deleted file mode 100644
index 5f54461cabc5..000000000000
--- a/drivers/scsi/isci/firmware/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
1# Makefile for create_fw
2#
3CC=gcc
4CFLAGS=-c -Wall -O2 -g
5LDFLAGS=
6SOURCES=create_fw.c
7OBJECTS=$(SOURCES:.cpp=.o)
8EXECUTABLE=create_fw
9
10all: $(SOURCES) $(EXECUTABLE)
11
12$(EXECUTABLE): $(OBJECTS)
13 $(CC) $(LDFLAGS) $(OBJECTS) -o $@
14
15.c.o:
16 $(CC) $(CFLAGS) $< -O $@
17
18clean:
19 rm -f *.o $(EXECUTABLE)
diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README
deleted file mode 100644
index 8056d2bd233b..000000000000
--- a/drivers/scsi/isci/firmware/README
+++ /dev/null
@@ -1,36 +0,0 @@
1This defines the temporary binary blow we are to pass to the SCU
2driver to emulate the binary firmware that we will eventually be
3able to access via NVRAM on the SCU controller.
4
5The current size of the binary blob is expected to be 149 bytes or larger
6
7Header Types:
80x1: Phy Masks
90x2: Phy Gens
100x3: SAS Addrs
110xff: End of Data
12
13ID string - u8[12]: "#SCU MAGIC#\0"
14Version - u8: 1
15SubVersion - u8: 0
16
17Header Type - u8: 0x1
18Size - u8: 8
19Phy Mask - u32[8]
20
21Header Type - u8: 0x2
22Size - u8: 8
23Phy Gen - u32[8]
24
25Header Type - u8: 0x3
26Size - u8: 8
27Sas Addr - u64[8]
28
29Header Type - u8: 0xf
30
31
32==============================================================================
33
34Place isci_firmware.bin in /lib/firmware
35Be sure to recreate the initramfs image to include the firmware.
36
diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c
deleted file mode 100644
index c7a2887a7e95..000000000000
--- a/drivers/scsi/isci/firmware/create_fw.c
+++ /dev/null
@@ -1,99 +0,0 @@
1#include <stdio.h>
2#include <stdlib.h>
3#include <unistd.h>
4#include <sys/types.h>
5#include <sys/stat.h>
6#include <fcntl.h>
7#include <string.h>
8#include <errno.h>
9#include <asm/types.h>
10#include <strings.h>
11#include <stdint.h>
12
13#include "create_fw.h"
14#include "../probe_roms.h"
15
16int write_blob(struct isci_orom *isci_orom)
17{
18 FILE *fd;
19 int err;
20 size_t count;
21
22 fd = fopen(blob_name, "w+");
23 if (!fd) {
24 perror("Open file for write failed");
25 fclose(fd);
26 return -EIO;
27 }
28
29 count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
30 if (count != 1) {
31 perror("Write data failed");
32 fclose(fd);
33 return -EIO;
34 }
35
36 fclose(fd);
37
38 return 0;
39}
40
41void set_binary_values(struct isci_orom *isci_orom)
42{
43 int ctrl_idx, phy_idx, port_idx;
44
45 /* setting OROM signature */
46 strncpy(isci_orom->hdr.signature, sig, strlen(sig));
47 isci_orom->hdr.version = version;
48 isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
49 isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
50 isci_orom->hdr.num_elements = num_elements;
51
52 for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
53 isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
54 isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
55 max_num_concurrent_dev_spin_up;
56 isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
57 enable_ssc;
58
59 for (port_idx = 0; port_idx < 4; port_idx++)
60 isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
61 phy_mask[ctrl_idx][port_idx];
62
63 for (phy_idx = 0; phy_idx < 4; phy_idx++) {
64 isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
65 (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
66 isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
67 (__u32)(sas_addr[ctrl_idx][phy_idx]);
68
69 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
70 afe_tx_amp_control0;
71 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
72 afe_tx_amp_control1;
73 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
74 afe_tx_amp_control2;
75 isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
76 afe_tx_amp_control3;
77 }
78 }
79}
80
81int main(void)
82{
83 int err;
84 struct isci_orom *isci_orom;
85
86 isci_orom = malloc(sizeof(struct isci_orom));
87 memset(isci_orom, 0, sizeof(struct isci_orom));
88
89 set_binary_values(isci_orom);
90
91 err = write_blob(isci_orom);
92 if (err < 0) {
93 free(isci_orom);
94 return err;
95 }
96
97 free(isci_orom);
98 return 0;
99}
diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h
deleted file mode 100644
index 5f298828d22e..000000000000
--- a/drivers/scsi/isci/firmware/create_fw.h
+++ /dev/null
@@ -1,77 +0,0 @@
1#ifndef _CREATE_FW_H_
2#define _CREATE_FW_H_
3#include "../probe_roms.h"
4
5
6/* we are configuring for 2 SCUs */
7static const int num_elements = 2;
8
9/*
10 * For all defined arrays:
11 * elements 0-3 are for SCU0, ports 0-3
12 * elements 4-7 are for SCU1, ports 0-3
13 *
14 * valid configurations for one SCU are:
15 * P0 P1 P2 P3
16 * ----------------
17 * 0xF,0x0,0x0,0x0 # 1 x4 port
18 * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
19 * # ports
20 * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
21 * # port
22 * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
23 * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
24 *
25 * if there is a port/phy on which you do not wish to override the default
26 * values, use the value assigned to UNINIT_PARAM (255).
27 */
28
29/* discovery mode type (port auto config mode by default ) */
30
31/*
32 * if there is a port/phy on which you do not wish to override the default
33 * values, use the value "0000000000000000". SAS address of zero's is
34 * considered invalid and will not be used.
35 */
36#ifdef MPC
37static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
38static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
39 {1, 2, 4, 8} };
40static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
41 0x5FCFFFFFF0000002ULL,
42 0x5FCFFFFFF0000003ULL,
43 0x5FCFFFFFF0000004ULL },
44 { 0x5FCFFFFFF0000005ULL,
45 0x5FCFFFFFF0000006ULL,
46 0x5FCFFFFFF0000007ULL,
47 0x5FCFFFFFF0000008ULL } };
48#else /* APC (default) */
49static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
50static const __u8 phy_mask[2][4];
51static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
52 0x5FCFFFFF00000001ULL,
53 0x5FCFFFFF00000001ULL,
54 0x5FCFFFFF00000001ULL },
55 { 0x5FCFFFFF00000002ULL,
56 0x5FCFFFFF00000002ULL,
57 0x5FCFFFFF00000002ULL,
58 0x5FCFFFFF00000002ULL } };
59#endif
60
61/* Maximum number of concurrent device spin up */
62static const int max_num_concurrent_dev_spin_up = 1;
63
64/* enable of ssc operation */
65static const int enable_ssc;
66
67/* AFE_TX_AMP_CONTROL */
68static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
69static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
70static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
71static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
72
73static const char blob_name[] = "isci_firmware.bin";
74static const char sig[] = "ISCUOEMB";
75static const unsigned char version = 0x10;
76
77#endif
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index e7fe9c4c85b8..1a65d6514237 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -899,7 +899,8 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
899 */ 899 */
900 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) || 900 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
901 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) || 901 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
902 (iphy->is_in_link_training == true && is_phy_starting(iphy))) { 902 (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
903 (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) {
903 is_controller_start_complete = false; 904 is_controller_start_complete = false;
904 break; 905 break;
905 } 906 }
@@ -1666,6 +1667,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
1666 /* Default to no SSC operation. */ 1667 /* Default to no SSC operation. */
1667 ihost->oem_parameters.controller.do_enable_ssc = false; 1668 ihost->oem_parameters.controller.do_enable_ssc = false;
1668 1669
1670 /* Default to short cables on all phys. */
1671 ihost->oem_parameters.controller.cable_selection_mask = 0;
1672
1669 /* Initialize all of the port parameter information to narrow ports. */ 1673 /* Initialize all of the port parameter information to narrow ports. */
1670 for (index = 0; index < SCI_MAX_PORTS; index++) { 1674 for (index = 0; index < SCI_MAX_PORTS; index++) {
1671 ihost->oem_parameters.ports[index].phy_mask = 0; 1675 ihost->oem_parameters.ports[index].phy_mask = 0;
@@ -1673,8 +1677,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
1673 1677
1674 /* Initialize all of the phy parameter information. */ 1678 /* Initialize all of the phy parameter information. */
1675 for (index = 0; index < SCI_MAX_PHYS; index++) { 1679 for (index = 0; index < SCI_MAX_PHYS; index++) {
1676 /* Default to 6G (i.e. Gen 3) for now. */ 1680 /* Default to 3G (i.e. Gen 2). */
1677 ihost->user_parameters.phys[index].max_speed_generation = 3; 1681 ihost->user_parameters.phys[index].max_speed_generation =
1682 SCIC_SDS_PARM_GEN2_SPEED;
1678 1683
1679 /* the frequencies cannot be 0 */ 1684 /* the frequencies cannot be 0 */
1680 ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f; 1685 ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
@@ -1694,7 +1699,7 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
1694 ihost->user_parameters.ssp_inactivity_timeout = 5; 1699 ihost->user_parameters.ssp_inactivity_timeout = 5;
1695 ihost->user_parameters.stp_max_occupancy_timeout = 5; 1700 ihost->user_parameters.stp_max_occupancy_timeout = 5;
1696 ihost->user_parameters.ssp_max_occupancy_timeout = 20; 1701 ihost->user_parameters.ssp_max_occupancy_timeout = 20;
1697 ihost->user_parameters.no_outbound_task_timeout = 20; 1702 ihost->user_parameters.no_outbound_task_timeout = 2;
1698} 1703}
1699 1704
1700static void controller_timeout(unsigned long data) 1705static void controller_timeout(unsigned long data)
@@ -1759,7 +1764,7 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost,
1759 return sci_controller_reset(ihost); 1764 return sci_controller_reset(ihost);
1760} 1765}
1761 1766
1762int sci_oem_parameters_validate(struct sci_oem_params *oem) 1767int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
1763{ 1768{
1764 int i; 1769 int i;
1765 1770
@@ -1791,18 +1796,61 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem)
1791 oem->controller.max_concurr_spin_up < 1) 1796 oem->controller.max_concurr_spin_up < 1)
1792 return -EINVAL; 1797 return -EINVAL;
1793 1798
1799 if (oem->controller.do_enable_ssc) {
1800 if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1)
1801 return -EINVAL;
1802
1803 if (version >= ISCI_ROM_VER_1_1) {
1804 u8 test = oem->controller.ssc_sata_tx_spread_level;
1805
1806 switch (test) {
1807 case 0:
1808 case 2:
1809 case 3:
1810 case 6:
1811 case 7:
1812 break;
1813 default:
1814 return -EINVAL;
1815 }
1816
1817 test = oem->controller.ssc_sas_tx_spread_level;
1818 if (oem->controller.ssc_sas_tx_type == 0) {
1819 switch (test) {
1820 case 0:
1821 case 2:
1822 case 3:
1823 break;
1824 default:
1825 return -EINVAL;
1826 }
1827 } else if (oem->controller.ssc_sas_tx_type == 1) {
1828 switch (test) {
1829 case 0:
1830 case 3:
1831 case 6:
1832 break;
1833 default:
1834 return -EINVAL;
1835 }
1836 }
1837 }
1838 }
1839
1794 return 0; 1840 return 0;
1795} 1841}
1796 1842
1797static enum sci_status sci_oem_parameters_set(struct isci_host *ihost) 1843static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
1798{ 1844{
1799 u32 state = ihost->sm.current_state_id; 1845 u32 state = ihost->sm.current_state_id;
1846 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
1800 1847
1801 if (state == SCIC_RESET || 1848 if (state == SCIC_RESET ||
1802 state == SCIC_INITIALIZING || 1849 state == SCIC_INITIALIZING ||
1803 state == SCIC_INITIALIZED) { 1850 state == SCIC_INITIALIZED) {
1804 1851
1805 if (sci_oem_parameters_validate(&ihost->oem_parameters)) 1852 if (sci_oem_parameters_validate(&ihost->oem_parameters,
1853 pci_info->orom->hdr.version))
1806 return SCI_FAILURE_INVALID_PARAMETER_VALUE; 1854 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1807 1855
1808 return SCI_SUCCESS; 1856 return SCI_SUCCESS;
@@ -1857,6 +1905,31 @@ static void power_control_timeout(unsigned long data)
1857 ihost->power_control.phys_waiting--; 1905 ihost->power_control.phys_waiting--;
1858 ihost->power_control.phys_granted_power++; 1906 ihost->power_control.phys_granted_power++;
1859 sci_phy_consume_power_handler(iphy); 1907 sci_phy_consume_power_handler(iphy);
1908
1909 if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
1910 u8 j;
1911
1912 for (j = 0; j < SCI_MAX_PHYS; j++) {
1913 struct isci_phy *requester = ihost->power_control.requesters[j];
1914
1915 /*
1916 * Search the power_control queue to see if there are other phys
1917 * attached to the same remote device. If found, take all of
1918 * them out of await_sas_power state.
1919 */
1920 if (requester != NULL && requester != iphy) {
1921 u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
1922 iphy->frame_rcvd.iaf.sas_addr,
1923 sizeof(requester->frame_rcvd.iaf.sas_addr));
1924
1925 if (other == 0) {
1926 ihost->power_control.requesters[j] = NULL;
1927 ihost->power_control.phys_waiting--;
1928 sci_phy_consume_power_handler(requester);
1929 }
1930 }
1931 }
1932 }
1860 } 1933 }
1861 1934
1862 /* 1935 /*
@@ -1891,9 +1964,34 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost,
1891 ihost->power_control.timer_started = true; 1964 ihost->power_control.timer_started = true;
1892 1965
1893 } else { 1966 } else {
1894 /* Add the phy in the waiting list */ 1967 /*
1895 ihost->power_control.requesters[iphy->phy_index] = iphy; 1968 * There are phys, attached to the same sas address as this phy, are
1896 ihost->power_control.phys_waiting++; 1969 * already in READY state, this phy don't need wait.
1970 */
1971 u8 i;
1972 struct isci_phy *current_phy;
1973
1974 for (i = 0; i < SCI_MAX_PHYS; i++) {
1975 u8 other;
1976 current_phy = &ihost->phys[i];
1977
1978 other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
1979 iphy->frame_rcvd.iaf.sas_addr,
1980 sizeof(current_phy->frame_rcvd.iaf.sas_addr));
1981
1982 if (current_phy->sm.current_state_id == SCI_PHY_READY &&
1983 current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS &&
1984 other == 0) {
1985 sci_phy_consume_power_handler(iphy);
1986 break;
1987 }
1988 }
1989
1990 if (i == SCI_MAX_PHYS) {
1991 /* Add the phy in the waiting list */
1992 ihost->power_control.requesters[iphy->phy_index] = iphy;
1993 ihost->power_control.phys_waiting++;
1994 }
1897 } 1995 }
1898} 1996}
1899 1997
@@ -1908,162 +2006,250 @@ void sci_controller_power_control_queue_remove(struct isci_host *ihost,
1908 ihost->power_control.requesters[iphy->phy_index] = NULL; 2006 ihost->power_control.requesters[iphy->phy_index] = NULL;
1909} 2007}
1910 2008
2009static int is_long_cable(int phy, unsigned char selection_byte)
2010{
2011 return !!(selection_byte & (1 << phy));
2012}
2013
2014static int is_medium_cable(int phy, unsigned char selection_byte)
2015{
2016 return !!(selection_byte & (1 << (phy + 4)));
2017}
2018
2019static enum cable_selections decode_selection_byte(
2020 int phy,
2021 unsigned char selection_byte)
2022{
2023 return ((selection_byte & (1 << phy)) ? 1 : 0)
2024 + (selection_byte & (1 << (phy + 4)) ? 2 : 0);
2025}
2026
2027static unsigned char *to_cable_select(struct isci_host *ihost)
2028{
2029 if (is_cable_select_overridden())
2030 return ((unsigned char *)&cable_selection_override)
2031 + ihost->id;
2032 else
2033 return &ihost->oem_parameters.controller.cable_selection_mask;
2034}
2035
2036enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
2037{
2038 return decode_selection_byte(phy, *to_cable_select(ihost));
2039}
2040
2041char *lookup_cable_names(enum cable_selections selection)
2042{
2043 static char *cable_names[] = {
2044 [short_cable] = "short",
2045 [long_cable] = "long",
2046 [medium_cable] = "medium",
2047 [undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */
2048 };
2049 return (selection <= undefined_cable) ? cable_names[selection]
2050 : cable_names[undefined_cable];
2051}
2052
1911#define AFE_REGISTER_WRITE_DELAY 10 2053#define AFE_REGISTER_WRITE_DELAY 10
1912 2054
1913/* Initialize the AFE for this phy index. We need to read the AFE setup from
1914 * the OEM parameters
1915 */
1916static void sci_controller_afe_initialization(struct isci_host *ihost) 2055static void sci_controller_afe_initialization(struct isci_host *ihost)
1917{ 2056{
2057 struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
1918 const struct sci_oem_params *oem = &ihost->oem_parameters; 2058 const struct sci_oem_params *oem = &ihost->oem_parameters;
1919 struct pci_dev *pdev = ihost->pdev; 2059 struct pci_dev *pdev = ihost->pdev;
1920 u32 afe_status; 2060 u32 afe_status;
1921 u32 phy_id; 2061 u32 phy_id;
2062 unsigned char cable_selection_mask = *to_cable_select(ihost);
1922 2063
1923 /* Clear DFX Status registers */ 2064 /* Clear DFX Status registers */
1924 writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0); 2065 writel(0x0081000f, &afe->afe_dfx_master_control0);
1925 udelay(AFE_REGISTER_WRITE_DELAY); 2066 udelay(AFE_REGISTER_WRITE_DELAY);
1926 2067
1927 if (is_b0(pdev)) { 2068 if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) {
1928 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement 2069 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
1929 * Timer, PM Stagger Timer */ 2070 * Timer, PM Stagger Timer
1930 writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2); 2071 */
2072 writel(0x0007FFFF, &afe->afe_pmsn_master_control2);
1931 udelay(AFE_REGISTER_WRITE_DELAY); 2073 udelay(AFE_REGISTER_WRITE_DELAY);
1932 } 2074 }
1933 2075
1934 /* Configure bias currents to normal */ 2076 /* Configure bias currents to normal */
1935 if (is_a2(pdev)) 2077 if (is_a2(pdev))
1936 writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control); 2078 writel(0x00005A00, &afe->afe_bias_control);
1937 else if (is_b0(pdev) || is_c0(pdev)) 2079 else if (is_b0(pdev) || is_c0(pdev))
1938 writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control); 2080 writel(0x00005F00, &afe->afe_bias_control);
2081 else if (is_c1(pdev))
2082 writel(0x00005500, &afe->afe_bias_control);
1939 2083
1940 udelay(AFE_REGISTER_WRITE_DELAY); 2084 udelay(AFE_REGISTER_WRITE_DELAY);
1941 2085
1942 /* Enable PLL */ 2086 /* Enable PLL */
1943 if (is_b0(pdev) || is_c0(pdev)) 2087 if (is_a2(pdev))
1944 writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0); 2088 writel(0x80040908, &afe->afe_pll_control0);
1945 else 2089 else if (is_b0(pdev) || is_c0(pdev))
1946 writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0); 2090 writel(0x80040A08, &afe->afe_pll_control0);
2091 else if (is_c1(pdev)) {
2092 writel(0x80000B08, &afe->afe_pll_control0);
2093 udelay(AFE_REGISTER_WRITE_DELAY);
2094 writel(0x00000B08, &afe->afe_pll_control0);
2095 udelay(AFE_REGISTER_WRITE_DELAY);
2096 writel(0x80000B08, &afe->afe_pll_control0);
2097 }
1947 2098
1948 udelay(AFE_REGISTER_WRITE_DELAY); 2099 udelay(AFE_REGISTER_WRITE_DELAY);
1949 2100
1950 /* Wait for the PLL to lock */ 2101 /* Wait for the PLL to lock */
1951 do { 2102 do {
1952 afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status); 2103 afe_status = readl(&afe->afe_common_block_status);
1953 udelay(AFE_REGISTER_WRITE_DELAY); 2104 udelay(AFE_REGISTER_WRITE_DELAY);
1954 } while ((afe_status & 0x00001000) == 0); 2105 } while ((afe_status & 0x00001000) == 0);
1955 2106
1956 if (is_a2(pdev)) { 2107 if (is_a2(pdev)) {
1957 /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */ 2108 /* Shorten SAS SNW lock time (RxLock timer value from 76
1958 writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0); 2109 * us to 50 us)
2110 */
2111 writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
1959 udelay(AFE_REGISTER_WRITE_DELAY); 2112 udelay(AFE_REGISTER_WRITE_DELAY);
1960 } 2113 }
1961 2114
1962 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) { 2115 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
2116 struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_id];
1963 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id]; 2117 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
2118 int cable_length_long =
2119 is_long_cable(phy_id, cable_selection_mask);
2120 int cable_length_medium =
2121 is_medium_cable(phy_id, cable_selection_mask);
1964 2122
1965 if (is_b0(pdev)) { 2123 if (is_a2(pdev)) {
1966 /* Configure transmitter SSC parameters */ 2124 /* All defaults, except the Receive Word
1967 writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); 2125 * Alignament/Comma Detect Enable....(0xe800)
2126 */
2127 writel(0x00004512, &xcvr->afe_xcvr_control0);
2128 udelay(AFE_REGISTER_WRITE_DELAY);
2129
2130 writel(0x0050100F, &xcvr->afe_xcvr_control1);
2131 udelay(AFE_REGISTER_WRITE_DELAY);
2132 } else if (is_b0(pdev)) {
2133 /* Configure transmitter SSC parameters */
2134 writel(0x00030000, &xcvr->afe_tx_ssc_control);
1968 udelay(AFE_REGISTER_WRITE_DELAY); 2135 udelay(AFE_REGISTER_WRITE_DELAY);
1969 } else if (is_c0(pdev)) { 2136 } else if (is_c0(pdev)) {
1970 /* Configure transmitter SSC parameters */ 2137 /* Configure transmitter SSC parameters */
1971 writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); 2138 writel(0x00010202, &xcvr->afe_tx_ssc_control);
1972 udelay(AFE_REGISTER_WRITE_DELAY); 2139 udelay(AFE_REGISTER_WRITE_DELAY);
1973 2140
1974 /* 2141 /* All defaults, except the Receive Word
1975 * All defaults, except the Receive Word Alignament/Comma Detect 2142 * Alignament/Comma Detect Enable....(0xe800)
1976 * Enable....(0xe800) */ 2143 */
1977 writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); 2144 writel(0x00014500, &xcvr->afe_xcvr_control0);
1978 udelay(AFE_REGISTER_WRITE_DELAY); 2145 udelay(AFE_REGISTER_WRITE_DELAY);
1979 } else { 2146 } else if (is_c1(pdev)) {
1980 /* 2147 /* Configure transmitter SSC parameters */
1981 * All defaults, except the Receive Word Alignament/Comma Detect 2148 writel(0x00010202, &xcvr->afe_tx_ssc_control);
1982 * Enable....(0xe800) */
1983 writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
1984 udelay(AFE_REGISTER_WRITE_DELAY); 2149 udelay(AFE_REGISTER_WRITE_DELAY);
1985 2150
1986 writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1); 2151 /* All defaults, except the Receive Word
2152 * Alignament/Comma Detect Enable....(0xe800)
2153 */
2154 writel(0x0001C500, &xcvr->afe_xcvr_control0);
1987 udelay(AFE_REGISTER_WRITE_DELAY); 2155 udelay(AFE_REGISTER_WRITE_DELAY);
1988 } 2156 }
1989 2157
1990 /* 2158 /* Power up TX and RX out from power down (PWRDNTX and
1991 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) 2159 * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c)
1992 * & increase TX int & ext bias 20%....(0xe85c) */ 2160 */
1993 if (is_a2(pdev)) 2161 if (is_a2(pdev))
1994 writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 2162 writel(0x000003F0, &xcvr->afe_channel_control);
1995 else if (is_b0(pdev)) { 2163 else if (is_b0(pdev)) {
1996 /* Power down TX and RX (PWRDNTX and PWRDNRX) */ 2164 writel(0x000003D7, &xcvr->afe_channel_control);
1997 writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1998 udelay(AFE_REGISTER_WRITE_DELAY); 2165 udelay(AFE_REGISTER_WRITE_DELAY);
1999 2166
2000 /* 2167 writel(0x000003D4, &xcvr->afe_channel_control);
2001 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) 2168 } else if (is_c0(pdev)) {
2002 * & increase TX int & ext bias 20%....(0xe85c) */ 2169 writel(0x000001E7, &xcvr->afe_channel_control);
2003 writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2004 } else {
2005 writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2006 udelay(AFE_REGISTER_WRITE_DELAY); 2170 udelay(AFE_REGISTER_WRITE_DELAY);
2007 2171
2008 /* 2172 writel(0x000001E4, &xcvr->afe_channel_control);
2009 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) 2173 } else if (is_c1(pdev)) {
2010 * & increase TX int & ext bias 20%....(0xe85c) */ 2174 writel(cable_length_long ? 0x000002F7 : 0x000001F7,
2011 writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); 2175 &xcvr->afe_channel_control);
2176 udelay(AFE_REGISTER_WRITE_DELAY);
2177
2178 writel(cable_length_long ? 0x000002F4 : 0x000001F4,
2179 &xcvr->afe_channel_control);
2012 } 2180 }
2013 udelay(AFE_REGISTER_WRITE_DELAY); 2181 udelay(AFE_REGISTER_WRITE_DELAY);
2014 2182
2015 if (is_a2(pdev)) { 2183 if (is_a2(pdev)) {
2016 /* Enable TX equalization (0xe824) */ 2184 /* Enable TX equalization (0xe824) */
2017 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); 2185 writel(0x00040000, &xcvr->afe_tx_control);
2018 udelay(AFE_REGISTER_WRITE_DELAY); 2186 udelay(AFE_REGISTER_WRITE_DELAY);
2019 } 2187 }
2020 2188
2021 /* 2189 if (is_a2(pdev) || is_b0(pdev))
2022 * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On), 2190 /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0,
2023 * RDD=0x0(RX Detect Enabled) ....(0xe800) */ 2191 * TPD=0x0(TX Power On), RDD=0x0(RX Detect
2024 writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); 2192 * Enabled) ....(0xe800)
2193 */
2194 writel(0x00004100, &xcvr->afe_xcvr_control0);
2195 else if (is_c0(pdev))
2196 writel(0x00014100, &xcvr->afe_xcvr_control0);
2197 else if (is_c1(pdev))
2198 writel(0x0001C100, &xcvr->afe_xcvr_control0);
2025 udelay(AFE_REGISTER_WRITE_DELAY); 2199 udelay(AFE_REGISTER_WRITE_DELAY);
2026 2200
2027 /* Leave DFE/FFE on */ 2201 /* Leave DFE/FFE on */
2028 if (is_a2(pdev)) 2202 if (is_a2(pdev))
2029 writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); 2203 writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
2030 else if (is_b0(pdev)) { 2204 else if (is_b0(pdev)) {
2031 writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); 2205 writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
2032 udelay(AFE_REGISTER_WRITE_DELAY); 2206 udelay(AFE_REGISTER_WRITE_DELAY);
2033 /* Enable TX equalization (0xe824) */ 2207 /* Enable TX equalization (0xe824) */
2034 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); 2208 writel(0x00040000, &xcvr->afe_tx_control);
2035 } else { 2209 } else if (is_c0(pdev)) {
2036 writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1); 2210 writel(0x01400C0F, &xcvr->afe_rx_ssc_control1);
2211 udelay(AFE_REGISTER_WRITE_DELAY);
2212
2213 writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0);
2214 udelay(AFE_REGISTER_WRITE_DELAY);
2215
2216 /* Enable TX equalization (0xe824) */
2217 writel(0x00040000, &xcvr->afe_tx_control);
2218 } else if (is_c1(pdev)) {
2219 writel(cable_length_long ? 0x01500C0C :
2220 cable_length_medium ? 0x01400C0D : 0x02400C0D,
2221 &xcvr->afe_xcvr_control1);
2222 udelay(AFE_REGISTER_WRITE_DELAY);
2223
2224 writel(0x000003E0, &xcvr->afe_dfx_rx_control1);
2037 udelay(AFE_REGISTER_WRITE_DELAY); 2225 udelay(AFE_REGISTER_WRITE_DELAY);
2038 2226
2039 writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); 2227 writel(cable_length_long ? 0x33091C1F :
2228 cable_length_medium ? 0x3315181F : 0x2B17161F,
2229 &xcvr->afe_rx_ssc_control0);
2040 udelay(AFE_REGISTER_WRITE_DELAY); 2230 udelay(AFE_REGISTER_WRITE_DELAY);
2041 2231
2042 /* Enable TX equalization (0xe824) */ 2232 /* Enable TX equalization (0xe824) */
2043 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); 2233 writel(0x00040000, &xcvr->afe_tx_control);
2044 } 2234 }
2045 2235
2046 udelay(AFE_REGISTER_WRITE_DELAY); 2236 udelay(AFE_REGISTER_WRITE_DELAY);
2047 2237
2048 writel(oem_phy->afe_tx_amp_control0, 2238 writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0);
2049 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
2050 udelay(AFE_REGISTER_WRITE_DELAY); 2239 udelay(AFE_REGISTER_WRITE_DELAY);
2051 2240
2052 writel(oem_phy->afe_tx_amp_control1, 2241 writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1);
2053 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
2054 udelay(AFE_REGISTER_WRITE_DELAY); 2242 udelay(AFE_REGISTER_WRITE_DELAY);
2055 2243
2056 writel(oem_phy->afe_tx_amp_control2, 2244 writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2);
2057 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
2058 udelay(AFE_REGISTER_WRITE_DELAY); 2245 udelay(AFE_REGISTER_WRITE_DELAY);
2059 2246
2060 writel(oem_phy->afe_tx_amp_control3, 2247 writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3);
2061 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
2062 udelay(AFE_REGISTER_WRITE_DELAY); 2248 udelay(AFE_REGISTER_WRITE_DELAY);
2063 } 2249 }
2064 2250
2065 /* Transfer control to the PEs */ 2251 /* Transfer control to the PEs */
2066 writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0); 2252 writel(0x00010f00, &afe->afe_dfx_master_control0);
2067 udelay(AFE_REGISTER_WRITE_DELAY); 2253 udelay(AFE_REGISTER_WRITE_DELAY);
2068} 2254}
2069 2255
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 646051afd3cb..5477f0fa8233 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -435,11 +435,36 @@ static inline bool is_b0(struct pci_dev *pdev)
435 435
436static inline bool is_c0(struct pci_dev *pdev) 436static inline bool is_c0(struct pci_dev *pdev)
437{ 437{
438 if (pdev->revision >= 5) 438 if (pdev->revision == 5)
439 return true; 439 return true;
440 return false; 440 return false;
441} 441}
442 442
443static inline bool is_c1(struct pci_dev *pdev)
444{
445 if (pdev->revision >= 6)
446 return true;
447 return false;
448}
449
450enum cable_selections {
451 short_cable = 0,
452 long_cable = 1,
453 medium_cable = 2,
454 undefined_cable = 3
455};
456
457#define CABLE_OVERRIDE_DISABLED (0x10000)
458
459static inline int is_cable_select_overridden(void)
460{
461 return cable_selection_override < CABLE_OVERRIDE_DISABLED;
462}
463
464enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy);
465void validate_cable_selections(struct isci_host *ihost);
466char *lookup_cable_names(enum cable_selections);
467
443/* set hw control for 'activity', even though active enclosures seem to drive 468/* set hw control for 'activity', even though active enclosures seem to drive
444 * the activity led on their own. Skip setting FSENG control on 'status' due 469 * the activity led on their own. Skip setting FSENG control on 'status' due
445 * to unexpected operation and 'error' due to not being a supported automatic 470 * to unexpected operation and 'error' due to not being a supported automatic
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index a97edabcb85a..17c4c2c89c2e 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -65,7 +65,7 @@
65#include "probe_roms.h" 65#include "probe_roms.h"
66 66
67#define MAJ 1 67#define MAJ 1
68#define MIN 0 68#define MIN 1
69#define BUILD 0 69#define BUILD 0
70#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 70#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
71 __stringify(BUILD) 71 __stringify(BUILD)
@@ -94,7 +94,7 @@ MODULE_DEVICE_TABLE(pci, isci_id_table);
94 94
95/* linux isci specific settings */ 95/* linux isci specific settings */
96 96
97unsigned char no_outbound_task_to = 20; 97unsigned char no_outbound_task_to = 2;
98module_param(no_outbound_task_to, byte, 0); 98module_param(no_outbound_task_to, byte, 0);
99MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)"); 99MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
100 100
@@ -114,7 +114,7 @@ u16 stp_inactive_to = 5;
114module_param(stp_inactive_to, ushort, 0); 114module_param(stp_inactive_to, ushort, 0);
115MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)"); 115MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
116 116
117unsigned char phy_gen = 3; 117unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
118module_param(phy_gen, byte, 0); 118module_param(phy_gen, byte, 0);
119MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)"); 119MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
120 120
@@ -122,6 +122,14 @@ unsigned char max_concurr_spinup;
122module_param(max_concurr_spinup, byte, 0); 122module_param(max_concurr_spinup, byte, 0);
123MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); 123MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
124 124
125uint cable_selection_override = CABLE_OVERRIDE_DISABLED;
126module_param(cable_selection_override, uint, 0);
127
128MODULE_PARM_DESC(cable_selection_override,
129 "This field indicates length of the SAS/SATA cable between "
130 "host and device. If any bits > 15 are set (default) "
131 "indicates \"use platform defaults\"");
132
125static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) 133static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
126{ 134{
127 struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); 135 struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
@@ -412,6 +420,14 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
412 return NULL; 420 return NULL;
413 isci_host->shost = shost; 421 isci_host->shost = shost;
414 422
423 dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
424 "{%s, %s, %s, %s}\n",
425 (is_cable_select_overridden() ? "* " : ""), isci_host->id,
426 lookup_cable_names(decode_cable_selection(isci_host, 3)),
427 lookup_cable_names(decode_cable_selection(isci_host, 2)),
428 lookup_cable_names(decode_cable_selection(isci_host, 1)),
429 lookup_cable_names(decode_cable_selection(isci_host, 0)));
430
415 err = isci_host_init(isci_host); 431 err = isci_host_init(isci_host);
416 if (err) 432 if (err)
417 goto err_shost; 433 goto err_shost;
@@ -466,7 +482,8 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
466 orom = isci_request_oprom(pdev); 482 orom = isci_request_oprom(pdev);
467 483
468 for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) { 484 for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
469 if (sci_oem_parameters_validate(&orom->ctrl[i])) { 485 if (sci_oem_parameters_validate(&orom->ctrl[i],
486 orom->hdr.version)) {
470 dev_warn(&pdev->dev, 487 dev_warn(&pdev->dev,
471 "[%d]: invalid oem parameters detected, falling back to firmware\n", i); 488 "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
472 devm_kfree(&pdev->dev, orom); 489 devm_kfree(&pdev->dev, orom);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index 8efeb6b08321..234ab46fce33 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -480,6 +480,7 @@ extern u16 ssp_inactive_to;
480extern u16 stp_inactive_to; 480extern u16 stp_inactive_to;
481extern unsigned char phy_gen; 481extern unsigned char phy_gen;
482extern unsigned char max_concurr_spinup; 482extern unsigned char max_concurr_spinup;
483extern uint cable_selection_override;
483 484
484irqreturn_t isci_msix_isr(int vec, void *data); 485irqreturn_t isci_msix_isr(int vec, void *data);
485irqreturn_t isci_intx_isr(int vec, void *data); 486irqreturn_t isci_intx_isr(int vec, void *data);
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 35f50c2183e1..fe18acfd6eb3 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -91,22 +91,23 @@ sci_phy_transport_layer_initialization(struct isci_phy *iphy,
91 91
92static enum sci_status 92static enum sci_status
93sci_phy_link_layer_initialization(struct isci_phy *iphy, 93sci_phy_link_layer_initialization(struct isci_phy *iphy,
94 struct scu_link_layer_registers __iomem *reg) 94 struct scu_link_layer_registers __iomem *llr)
95{ 95{
96 struct isci_host *ihost = iphy->owning_port->owning_controller; 96 struct isci_host *ihost = iphy->owning_port->owning_controller;
97 struct sci_phy_user_params *phy_user;
98 struct sci_phy_oem_params *phy_oem;
97 int phy_idx = iphy->phy_index; 99 int phy_idx = iphy->phy_index;
98 struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
99 struct sci_phy_oem_params *phy_oem =
100 &ihost->oem_parameters.phys[phy_idx];
101 u32 phy_configuration;
102 struct sci_phy_cap phy_cap; 100 struct sci_phy_cap phy_cap;
101 u32 phy_configuration;
103 u32 parity_check = 0; 102 u32 parity_check = 0;
104 u32 parity_count = 0; 103 u32 parity_count = 0;
105 u32 llctl, link_rate; 104 u32 llctl, link_rate;
106 u32 clksm_value = 0; 105 u32 clksm_value = 0;
107 u32 sp_timeouts = 0; 106 u32 sp_timeouts = 0;
108 107
109 iphy->link_layer_registers = reg; 108 phy_user = &ihost->user_parameters.phys[phy_idx];
109 phy_oem = &ihost->oem_parameters.phys[phy_idx];
110 iphy->link_layer_registers = llr;
110 111
111 /* Set our IDENTIFY frame data */ 112 /* Set our IDENTIFY frame data */
112 #define SCI_END_DEVICE 0x01 113 #define SCI_END_DEVICE 0x01
@@ -116,32 +117,26 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
116 SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) | 117 SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
117 SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) | 118 SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
118 SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE), 119 SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
119 &iphy->link_layer_registers->transmit_identification); 120 &llr->transmit_identification);
120 121
121 /* Write the device SAS Address */ 122 /* Write the device SAS Address */
122 writel(0xFEDCBA98, 123 writel(0xFEDCBA98, &llr->sas_device_name_high);
123 &iphy->link_layer_registers->sas_device_name_high); 124 writel(phy_idx, &llr->sas_device_name_low);
124 writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
125 125
126 /* Write the source SAS Address */ 126 /* Write the source SAS Address */
127 writel(phy_oem->sas_address.high, 127 writel(phy_oem->sas_address.high, &llr->source_sas_address_high);
128 &iphy->link_layer_registers->source_sas_address_high); 128 writel(phy_oem->sas_address.low, &llr->source_sas_address_low);
129 writel(phy_oem->sas_address.low,
130 &iphy->link_layer_registers->source_sas_address_low);
131 129
132 /* Clear and Set the PHY Identifier */ 130 /* Clear and Set the PHY Identifier */
133 writel(0, &iphy->link_layer_registers->identify_frame_phy_id); 131 writel(0, &llr->identify_frame_phy_id);
134 writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), 132 writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), &llr->identify_frame_phy_id);
135 &iphy->link_layer_registers->identify_frame_phy_id);
136 133
137 /* Change the initial state of the phy configuration register */ 134 /* Change the initial state of the phy configuration register */
138 phy_configuration = 135 phy_configuration = readl(&llr->phy_configuration);
139 readl(&iphy->link_layer_registers->phy_configuration);
140 136
141 /* Hold OOB state machine in reset */ 137 /* Hold OOB state machine in reset */
142 phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET); 138 phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
143 writel(phy_configuration, 139 writel(phy_configuration, &llr->phy_configuration);
144 &iphy->link_layer_registers->phy_configuration);
145 140
146 /* Configure the SNW capabilities */ 141 /* Configure the SNW capabilities */
147 phy_cap.all = 0; 142 phy_cap.all = 0;
@@ -149,15 +144,64 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
149 phy_cap.gen3_no_ssc = 1; 144 phy_cap.gen3_no_ssc = 1;
150 phy_cap.gen2_no_ssc = 1; 145 phy_cap.gen2_no_ssc = 1;
151 phy_cap.gen1_no_ssc = 1; 146 phy_cap.gen1_no_ssc = 1;
152 if (ihost->oem_parameters.controller.do_enable_ssc == true) { 147 if (ihost->oem_parameters.controller.do_enable_ssc) {
148 struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
149 struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_idx];
150 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
151 bool en_sas = false;
152 bool en_sata = false;
153 u32 sas_type = 0;
154 u32 sata_spread = 0x2;
155 u32 sas_spread = 0x2;
156
153 phy_cap.gen3_ssc = 1; 157 phy_cap.gen3_ssc = 1;
154 phy_cap.gen2_ssc = 1; 158 phy_cap.gen2_ssc = 1;
155 phy_cap.gen1_ssc = 1; 159 phy_cap.gen1_ssc = 1;
160
161 if (pci_info->orom->hdr.version < ISCI_ROM_VER_1_1)
162 en_sas = en_sata = true;
163 else {
164 sata_spread = ihost->oem_parameters.controller.ssc_sata_tx_spread_level;
165 sas_spread = ihost->oem_parameters.controller.ssc_sas_tx_spread_level;
166
167 if (sata_spread)
168 en_sata = true;
169
170 if (sas_spread) {
171 en_sas = true;
172 sas_type = ihost->oem_parameters.controller.ssc_sas_tx_type;
173 }
174
175 }
176
177 if (en_sas) {
178 u32 reg;
179
180 reg = readl(&xcvr->afe_xcvr_control0);
181 reg |= (0x00100000 | (sas_type << 19));
182 writel(reg, &xcvr->afe_xcvr_control0);
183
184 reg = readl(&xcvr->afe_tx_ssc_control);
185 reg |= sas_spread << 8;
186 writel(reg, &xcvr->afe_tx_ssc_control);
187 }
188
189 if (en_sata) {
190 u32 reg;
191
192 reg = readl(&xcvr->afe_tx_ssc_control);
193 reg |= sata_spread;
194 writel(reg, &xcvr->afe_tx_ssc_control);
195
196 reg = readl(&llr->stp_control);
197 reg |= 1 << 12;
198 writel(reg, &llr->stp_control);
199 }
156 } 200 }
157 201
158 /* 202 /* The SAS specification indicates that the phy_capabilities that
159 * The SAS specification indicates that the phy_capabilities that 203 * are transmitted shall have an even parity. Calculate the parity.
160 * are transmitted shall have an even parity. Calculate the parity. */ 204 */
161 parity_check = phy_cap.all; 205 parity_check = phy_cap.all;
162 while (parity_check != 0) { 206 while (parity_check != 0) {
163 if (parity_check & 0x1) 207 if (parity_check & 0x1)
@@ -165,20 +209,20 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
165 parity_check >>= 1; 209 parity_check >>= 1;
166 } 210 }
167 211
168 /* 212 /* If parity indicates there are an odd number of bits set, then
169 * If parity indicates there are an odd number of bits set, then 213 * set the parity bit to 1 in the phy capabilities.
170 * set the parity bit to 1 in the phy capabilities. */ 214 */
171 if ((parity_count % 2) != 0) 215 if ((parity_count % 2) != 0)
172 phy_cap.parity = 1; 216 phy_cap.parity = 1;
173 217
174 writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities); 218 writel(phy_cap.all, &llr->phy_capabilities);
175 219
176 /* Set the enable spinup period but disable the ability to send 220 /* Set the enable spinup period but disable the ability to send
177 * notify enable spinup 221 * notify enable spinup
178 */ 222 */
179 writel(SCU_ENSPINUP_GEN_VAL(COUNT, 223 writel(SCU_ENSPINUP_GEN_VAL(COUNT,
180 phy_user->notify_enable_spin_up_insertion_frequency), 224 phy_user->notify_enable_spin_up_insertion_frequency),
181 &iphy->link_layer_registers->notify_enable_spinup_control); 225 &llr->notify_enable_spinup_control);
182 226
183 /* Write the ALIGN Insertion Ferequency for connected phy and 227 /* Write the ALIGN Insertion Ferequency for connected phy and
184 * inpendent of connected state 228 * inpendent of connected state
@@ -189,11 +233,13 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
189 clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL, 233 clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
190 phy_user->align_insertion_frequency); 234 phy_user->align_insertion_frequency);
191 235
192 writel(clksm_value, &iphy->link_layer_registers->clock_skew_management); 236 writel(clksm_value, &llr->clock_skew_management);
193 237
194 /* @todo Provide a way to write this register correctly */ 238 if (is_c0(ihost->pdev) || is_c1(ihost->pdev)) {
195 writel(0x02108421, 239 writel(0x04210400, &llr->afe_lookup_table_control);
196 &iphy->link_layer_registers->afe_lookup_table_control); 240 writel(0x020A7C05, &llr->sas_primitive_timeout);
241 } else
242 writel(0x02108421, &llr->afe_lookup_table_control);
197 243
198 llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT, 244 llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
199 (u8)ihost->user_parameters.no_outbound_task_timeout); 245 (u8)ihost->user_parameters.no_outbound_task_timeout);
@@ -210,9 +256,9 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
210 break; 256 break;
211 } 257 }
212 llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate); 258 llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
213 writel(llctl, &iphy->link_layer_registers->link_layer_control); 259 writel(llctl, &llr->link_layer_control);
214 260
215 sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts); 261 sp_timeouts = readl(&llr->sas_phy_timeouts);
216 262
217 /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */ 263 /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
218 sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF); 264 sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
@@ -222,20 +268,23 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
222 */ 268 */
223 sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B); 269 sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
224 270
225 writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts); 271 writel(sp_timeouts, &llr->sas_phy_timeouts);
226 272
227 if (is_a2(ihost->pdev)) { 273 if (is_a2(ihost->pdev)) {
228 /* Program the max ARB time for the PHY to 700us so we inter-operate with 274 /* Program the max ARB time for the PHY to 700us so we
229 * the PMC expander which shuts down PHYs if the expander PHY generates too 275 * inter-operate with the PMC expander which shuts down
230 * many breaks. This time value will guarantee that the initiator PHY will 276 * PHYs if the expander PHY generates too many breaks.
231 * generate the break. 277 * This time value will guarantee that the initiator PHY
278 * will generate the break.
232 */ 279 */
233 writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME, 280 writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
234 &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout); 281 &llr->maximum_arbitration_wait_timer_timeout);
235 } 282 }
236 283
237 /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */ 284 /* Disable link layer hang detection, rely on the OS timeout for
238 writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout); 285 * I/O timeouts.
286 */
287 writel(0, &llr->link_layer_hang_detection_timeout);
239 288
240 /* We can exit the initial state to the stopped state */ 289 /* We can exit the initial state to the stopped state */
241 sci_change_state(&iphy->sm, SCI_PHY_STOPPED); 290 sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
@@ -1049,24 +1098,25 @@ static void scu_link_layer_stop_protocol_engine(
1049 writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control); 1098 writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
1050} 1099}
1051 1100
1052/** 1101static void scu_link_layer_start_oob(struct isci_phy *iphy)
1053 *
1054 *
1055 * This method will start the OOB/SN state machine for this struct isci_phy object.
1056 */
1057static void scu_link_layer_start_oob(
1058 struct isci_phy *iphy)
1059{ 1102{
1060 u32 scu_sas_pcfg_value; 1103 struct scu_link_layer_registers __iomem *ll = iphy->link_layer_registers;
1061 1104 u32 val;
1062 scu_sas_pcfg_value = 1105
1063 readl(&iphy->link_layer_registers->phy_configuration); 1106 /** Reset OOB sequence - start */
1064 scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); 1107 val = readl(&ll->phy_configuration);
1065 scu_sas_pcfg_value &= 1108 val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
1066 ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) | 1109 SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
1067 SCU_SAS_PCFG_GEN_BIT(HARD_RESET)); 1110 writel(val, &ll->phy_configuration);
1068 writel(scu_sas_pcfg_value, 1111 readl(&ll->phy_configuration); /* flush */
1069 &iphy->link_layer_registers->phy_configuration); 1112 /** Reset OOB sequence - end */
1113
1114 /** Start OOB sequence - start */
1115 val = readl(&ll->phy_configuration);
1116 val |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
1117 writel(val, &ll->phy_configuration);
1118 readl(&ll->phy_configuration); /* flush */
1119 /** Start OOB sequence - end */
1070} 1120}
1071 1121
1072/** 1122/**
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index ac7f27749f97..7c6ac58a5c4c 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -114,7 +114,7 @@ static u32 sci_port_get_phys(struct isci_port *iport)
114 * value is returned if the specified port is not valid. When this value is 114 * value is returned if the specified port is not valid. When this value is
115 * returned, no data is copied to the properties output parameter. 115 * returned, no data is copied to the properties output parameter.
116 */ 116 */
117static enum sci_status sci_port_get_properties(struct isci_port *iport, 117enum sci_status sci_port_get_properties(struct isci_port *iport,
118 struct sci_port_properties *prop) 118 struct sci_port_properties *prop)
119{ 119{
120 if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT) 120 if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
@@ -647,19 +647,26 @@ void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
647 } 647 }
648} 648}
649 649
650static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy, 650static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
651 bool do_notify_user) 651{
652 sci_phy_resume(iphy);
653 iport->enabled_phy_mask |= 1 << iphy->phy_index;
654}
655
656static void sci_port_activate_phy(struct isci_port *iport,
657 struct isci_phy *iphy,
658 u8 flags)
652{ 659{
653 struct isci_host *ihost = iport->owning_controller; 660 struct isci_host *ihost = iport->owning_controller;
654 661
655 if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) 662 if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME))
656 sci_phy_resume(iphy); 663 sci_phy_resume(iphy);
657 664
658 iport->active_phy_mask |= 1 << iphy->phy_index; 665 iport->active_phy_mask |= 1 << iphy->phy_index;
659 666
660 sci_controller_clear_invalid_phy(ihost, iphy); 667 sci_controller_clear_invalid_phy(ihost, iphy);
661 668
662 if (do_notify_user == true) 669 if (flags & PF_NOTIFY)
663 isci_port_link_up(ihost, iport, iphy); 670 isci_port_link_up(ihost, iport, iphy);
664} 671}
665 672
@@ -669,14 +676,19 @@ void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
669 struct isci_host *ihost = iport->owning_controller; 676 struct isci_host *ihost = iport->owning_controller;
670 677
671 iport->active_phy_mask &= ~(1 << iphy->phy_index); 678 iport->active_phy_mask &= ~(1 << iphy->phy_index);
679 iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
672 if (!iport->active_phy_mask) 680 if (!iport->active_phy_mask)
673 iport->last_active_phy = iphy->phy_index; 681 iport->last_active_phy = iphy->phy_index;
674 682
675 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; 683 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
676 684
677 /* Re-assign the phy back to the LP as if it were a narrow port */ 685 /* Re-assign the phy back to the LP as if it were a narrow port for APC
678 writel(iphy->phy_index, 686 * mode. For MPC mode, the phy will remain in the port.
679 &iport->port_pe_configuration_register[iphy->phy_index]); 687 */
688 if (iport->owning_controller->oem_parameters.controller.mode_type ==
689 SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
690 writel(iphy->phy_index,
691 &iport->port_pe_configuration_register[iphy->phy_index]);
680 692
681 if (do_notify_user == true) 693 if (do_notify_user == true)
682 isci_port_link_down(ihost, iphy, iport); 694 isci_port_link_down(ihost, iphy, iport);
@@ -701,18 +713,16 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i
701 * sci_port_general_link_up_handler - phy can be assigned to port? 713 * sci_port_general_link_up_handler - phy can be assigned to port?
702 * @sci_port: sci_port object for which has a phy that has gone link up. 714 * @sci_port: sci_port object for which has a phy that has gone link up.
703 * @sci_phy: This is the struct isci_phy object that has gone link up. 715 * @sci_phy: This is the struct isci_phy object that has gone link up.
704 * @do_notify_user: This parameter specifies whether to inform the user (via 716 * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
705 * sci_port_link_up()) as to the fact that a new phy as become ready.
706 * 717 *
707 * Determine if this phy can be assigned to this 718 * Determine if this phy can be assigned to this port . If the phy is
708 * port . If the phy is not a valid PHY for 719 * not a valid PHY for this port then the function will notify the user.
709 * this port then the function will notify the user. A PHY can only be 720 * A PHY can only be part of a port if it's attached SAS ADDRESS is the
710 * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in 721 * same as all other PHYs in the same port.
711 * the same port. none
712 */ 722 */
713static void sci_port_general_link_up_handler(struct isci_port *iport, 723static void sci_port_general_link_up_handler(struct isci_port *iport,
714 struct isci_phy *iphy, 724 struct isci_phy *iphy,
715 bool do_notify_user) 725 u8 flags)
716{ 726{
717 struct sci_sas_address port_sas_address; 727 struct sci_sas_address port_sas_address;
718 struct sci_sas_address phy_sas_address; 728 struct sci_sas_address phy_sas_address;
@@ -730,7 +740,7 @@ static void sci_port_general_link_up_handler(struct isci_port *iport,
730 iport->active_phy_mask == 0) { 740 iport->active_phy_mask == 0) {
731 struct sci_base_state_machine *sm = &iport->sm; 741 struct sci_base_state_machine *sm = &iport->sm;
732 742
733 sci_port_activate_phy(iport, iphy, do_notify_user); 743 sci_port_activate_phy(iport, iphy, flags);
734 if (sm->current_state_id == SCI_PORT_RESETTING) 744 if (sm->current_state_id == SCI_PORT_RESETTING)
735 port_state_machine_change(iport, SCI_PORT_READY); 745 port_state_machine_change(iport, SCI_PORT_READY);
736 } else 746 } else
@@ -781,11 +791,16 @@ bool sci_port_link_detected(
781 struct isci_phy *iphy) 791 struct isci_phy *iphy)
782{ 792{
783 if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && 793 if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
784 (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) && 794 (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) {
785 sci_port_is_wide(iport)) { 795 if (sci_port_is_wide(iport)) {
786 sci_port_invalid_link_up(iport, iphy); 796 sci_port_invalid_link_up(iport, iphy);
787 797 return false;
788 return false; 798 } else {
799 struct isci_host *ihost = iport->owning_controller;
800 struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
801 writel(iphy->phy_index,
802 &dst_port->port_pe_configuration_register[iphy->phy_index]);
803 }
789 } 804 }
790 805
791 return true; 806 return true;
@@ -975,6 +990,13 @@ static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine
975 } 990 }
976} 991}
977 992
993static void scic_sds_port_ready_substate_waiting_exit(
994 struct sci_base_state_machine *sm)
995{
996 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
997 sci_port_resume_port_task_scheduler(iport);
998}
999
978static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm) 1000static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
979{ 1001{
980 u32 index; 1002 u32 index;
@@ -988,13 +1010,13 @@ static void sci_port_ready_substate_operational_enter(struct sci_base_state_mach
988 writel(iport->physical_port_index, 1010 writel(iport->physical_port_index,
989 &iport->port_pe_configuration_register[ 1011 &iport->port_pe_configuration_register[
990 iport->phy_table[index]->phy_index]); 1012 iport->phy_table[index]->phy_index]);
1013 if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
1014 sci_port_resume_phy(iport, iport->phy_table[index]);
991 } 1015 }
992 } 1016 }
993 1017
994 sci_port_update_viit_entry(iport); 1018 sci_port_update_viit_entry(iport);
995 1019
996 sci_port_resume_port_task_scheduler(iport);
997
998 /* 1020 /*
999 * Post the dummy task for the port so the hardware can schedule 1021 * Post the dummy task for the port so the hardware can schedule
1000 * io correctly 1022 * io correctly
@@ -1061,20 +1083,9 @@ static void sci_port_ready_substate_configuring_enter(struct sci_base_state_mach
1061 if (iport->active_phy_mask == 0) { 1083 if (iport->active_phy_mask == 0) {
1062 isci_port_not_ready(ihost, iport); 1084 isci_port_not_ready(ihost, iport);
1063 1085
1064 port_state_machine_change(iport, 1086 port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
1065 SCI_PORT_SUB_WAITING); 1087 } else
1066 } else if (iport->started_request_count == 0) 1088 port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
1067 port_state_machine_change(iport,
1068 SCI_PORT_SUB_OPERATIONAL);
1069}
1070
1071static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
1072{
1073 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1074
1075 sci_port_suspend_port_task_scheduler(iport);
1076 if (iport->ready_exit)
1077 sci_port_invalidate_dummy_remote_node(iport);
1078} 1089}
1079 1090
1080enum sci_status sci_port_start(struct isci_port *iport) 1091enum sci_status sci_port_start(struct isci_port *iport)
@@ -1252,7 +1263,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
1252 if (status != SCI_SUCCESS) 1263 if (status != SCI_SUCCESS)
1253 return status; 1264 return status;
1254 1265
1255 sci_port_general_link_up_handler(iport, iphy, true); 1266 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1256 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; 1267 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1257 port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); 1268 port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
1258 1269
@@ -1262,7 +1273,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
1262 1273
1263 if (status != SCI_SUCCESS) 1274 if (status != SCI_SUCCESS)
1264 return status; 1275 return status;
1265 sci_port_general_link_up_handler(iport, iphy, true); 1276 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
1266 1277
1267 /* Re-enter the configuring state since this may be the last phy in 1278 /* Re-enter the configuring state since this may be the last phy in
1268 * the port. 1279 * the port.
@@ -1338,13 +1349,13 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
1338 /* Since this is the first phy going link up for the port we 1349 /* Since this is the first phy going link up for the port we
1339 * can just enable it and continue 1350 * can just enable it and continue
1340 */ 1351 */
1341 sci_port_activate_phy(iport, iphy, true); 1352 sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
1342 1353
1343 port_state_machine_change(iport, 1354 port_state_machine_change(iport,
1344 SCI_PORT_SUB_OPERATIONAL); 1355 SCI_PORT_SUB_OPERATIONAL);
1345 return SCI_SUCCESS; 1356 return SCI_SUCCESS;
1346 case SCI_PORT_SUB_OPERATIONAL: 1357 case SCI_PORT_SUB_OPERATIONAL:
1347 sci_port_general_link_up_handler(iport, iphy, true); 1358 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1348 return SCI_SUCCESS; 1359 return SCI_SUCCESS;
1349 case SCI_PORT_RESETTING: 1360 case SCI_PORT_RESETTING:
1350 /* TODO We should make sure that the phy that has gone 1361 /* TODO We should make sure that the phy that has gone
@@ -1361,7 +1372,7 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
1361 /* In the resetting state we don't notify the user regarding 1372 /* In the resetting state we don't notify the user regarding
1362 * link up and link down notifications. 1373 * link up and link down notifications.
1363 */ 1374 */
1364 sci_port_general_link_up_handler(iport, iphy, false); 1375 sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
1365 return SCI_SUCCESS; 1376 return SCI_SUCCESS;
1366 default: 1377 default:
1367 dev_warn(sciport_to_dev(iport), 1378 dev_warn(sciport_to_dev(iport),
@@ -1584,14 +1595,14 @@ static const struct sci_base_state sci_port_state_table[] = {
1584 }, 1595 },
1585 [SCI_PORT_SUB_WAITING] = { 1596 [SCI_PORT_SUB_WAITING] = {
1586 .enter_state = sci_port_ready_substate_waiting_enter, 1597 .enter_state = sci_port_ready_substate_waiting_enter,
1598 .exit_state = scic_sds_port_ready_substate_waiting_exit,
1587 }, 1599 },
1588 [SCI_PORT_SUB_OPERATIONAL] = { 1600 [SCI_PORT_SUB_OPERATIONAL] = {
1589 .enter_state = sci_port_ready_substate_operational_enter, 1601 .enter_state = sci_port_ready_substate_operational_enter,
1590 .exit_state = sci_port_ready_substate_operational_exit 1602 .exit_state = sci_port_ready_substate_operational_exit
1591 }, 1603 },
1592 [SCI_PORT_SUB_CONFIGURING] = { 1604 [SCI_PORT_SUB_CONFIGURING] = {
1593 .enter_state = sci_port_ready_substate_configuring_enter, 1605 .enter_state = sci_port_ready_substate_configuring_enter
1594 .exit_state = sci_port_ready_substate_configuring_exit
1595 }, 1606 },
1596 [SCI_PORT_RESETTING] = { 1607 [SCI_PORT_RESETTING] = {
1597 .exit_state = sci_port_resetting_state_exit 1608 .exit_state = sci_port_resetting_state_exit
@@ -1609,6 +1620,7 @@ void sci_port_construct(struct isci_port *iport, u8 index,
1609 iport->logical_port_index = SCIC_SDS_DUMMY_PORT; 1620 iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
1610 iport->physical_port_index = index; 1621 iport->physical_port_index = index;
1611 iport->active_phy_mask = 0; 1622 iport->active_phy_mask = 0;
1623 iport->enabled_phy_mask = 0;
1612 iport->last_active_phy = 0; 1624 iport->last_active_phy = 0;
1613 iport->ready_exit = false; 1625 iport->ready_exit = false;
1614 1626
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index cb5ffbc38603..08116090eb70 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -63,6 +63,9 @@
63 63
64#define SCIC_SDS_DUMMY_PORT 0xFF 64#define SCIC_SDS_DUMMY_PORT 0xFF
65 65
66#define PF_NOTIFY (1 << 0)
67#define PF_RESUME (1 << 1)
68
66struct isci_phy; 69struct isci_phy;
67struct isci_host; 70struct isci_host;
68 71
@@ -83,6 +86,8 @@ enum isci_status {
83 * @logical_port_index: software port index 86 * @logical_port_index: software port index
84 * @physical_port_index: hardware port index 87 * @physical_port_index: hardware port index
85 * @active_phy_mask: identifies phy members 88 * @active_phy_mask: identifies phy members
89 * @enabled_phy_mask: phy mask for the port
90 * that are already part of the port
86 * @reserved_tag: 91 * @reserved_tag:
87 * @reserved_rni: reserver for port task scheduler workaround 92 * @reserved_rni: reserver for port task scheduler workaround
88 * @started_request_count: reference count for outstanding commands 93 * @started_request_count: reference count for outstanding commands
@@ -104,6 +109,7 @@ struct isci_port {
104 u8 logical_port_index; 109 u8 logical_port_index;
105 u8 physical_port_index; 110 u8 physical_port_index;
106 u8 active_phy_mask; 111 u8 active_phy_mask;
112 u8 enabled_phy_mask;
107 u8 last_active_phy; 113 u8 last_active_phy;
108 u16 reserved_rni; 114 u16 reserved_rni;
109 u16 reserved_tag; 115 u16 reserved_tag;
@@ -250,6 +256,10 @@ bool sci_port_link_detected(
250 struct isci_port *iport, 256 struct isci_port *iport,
251 struct isci_phy *iphy); 257 struct isci_phy *iphy);
252 258
259enum sci_status sci_port_get_properties(
260 struct isci_port *iport,
261 struct sci_port_properties *prop);
262
253enum sci_status sci_port_link_up(struct isci_port *iport, 263enum sci_status sci_port_link_up(struct isci_port *iport,
254 struct isci_phy *iphy); 264 struct isci_phy *iphy);
255enum sci_status sci_port_link_down(struct isci_port *iport, 265enum sci_status sci_port_link_down(struct isci_port *iport,
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 38a99d281141..6d1e9544cbe5 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -57,7 +57,7 @@
57 57
58#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10) 58#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10)
59#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10) 59#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10)
60#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (100) 60#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (250)
61 61
62enum SCIC_SDS_APC_ACTIVITY { 62enum SCIC_SDS_APC_ACTIVITY {
63 SCIC_SDS_APC_SKIP_PHY, 63 SCIC_SDS_APC_SKIP_PHY,
@@ -466,6 +466,23 @@ sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
466 return sci_port_configuration_agent_validate_ports(ihost, port_agent); 466 return sci_port_configuration_agent_validate_ports(ihost, port_agent);
467} 467}
468 468
469/*
470 * This routine will restart the automatic port configuration timeout
471 * timer for the next time period. This could be caused by either a link
472 * down event or a link up event where we can not yet tell to which a phy
473 * belongs.
474 */
475static void sci_apc_agent_start_timer(
476 struct sci_port_configuration_agent *port_agent,
477 u32 timeout)
478{
479 if (port_agent->timer_pending)
480 sci_del_timer(&port_agent->timer);
481
482 port_agent->timer_pending = true;
483 sci_mod_timer(&port_agent->timer, timeout);
484}
485
469static void sci_apc_agent_configure_ports(struct isci_host *ihost, 486static void sci_apc_agent_configure_ports(struct isci_host *ihost,
470 struct sci_port_configuration_agent *port_agent, 487 struct sci_port_configuration_agent *port_agent,
471 struct isci_phy *iphy, 488 struct isci_phy *iphy,
@@ -565,17 +582,8 @@ static void sci_apc_agent_configure_ports(struct isci_host *ihost,
565 break; 582 break;
566 583
567 case SCIC_SDS_APC_START_TIMER: 584 case SCIC_SDS_APC_START_TIMER:
568 /* 585 sci_apc_agent_start_timer(port_agent,
569 * This can occur for either a link down event, or a link 586 SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
570 * up event where we cannot yet tell the port to which a
571 * phy belongs.
572 */
573 if (port_agent->timer_pending)
574 sci_del_timer(&port_agent->timer);
575
576 port_agent->timer_pending = true;
577 sci_mod_timer(&port_agent->timer,
578 SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
579 break; 587 break;
580 588
581 case SCIC_SDS_APC_SKIP_PHY: 589 case SCIC_SDS_APC_SKIP_PHY:
@@ -607,7 +615,8 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
607 if (!iport) { 615 if (!iport) {
608 /* the phy is not the part of this port */ 616 /* the phy is not the part of this port */
609 port_agent->phy_ready_mask |= 1 << phy_index; 617 port_agent->phy_ready_mask |= 1 << phy_index;
610 sci_apc_agent_configure_ports(ihost, port_agent, iphy, true); 618 sci_apc_agent_start_timer(port_agent,
619 SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
611 } else { 620 } else {
612 /* the phy is already the part of the port */ 621 /* the phy is already the part of the port */
613 u32 port_state = iport->sm.current_state_id; 622 u32 port_state = iport->sm.current_state_id;
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index b5f4341de243..9b8117b9d756 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -147,7 +147,7 @@ struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmw
147 147
148 memcpy(orom, fw->data, fw->size); 148 memcpy(orom, fw->data, fw->size);
149 149
150 if (is_c0(pdev)) 150 if (is_c0(pdev) || is_c1(pdev))
151 goto out; 151 goto out;
152 152
153 /* 153 /*
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
index 2c75248ca326..bb0e9d4d97c9 100644
--- a/drivers/scsi/isci/probe_roms.h
+++ b/drivers/scsi/isci/probe_roms.h
@@ -152,7 +152,7 @@ struct sci_user_parameters {
152#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4 152#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
153 153
154struct sci_oem_params; 154struct sci_oem_params;
155int sci_oem_parameters_validate(struct sci_oem_params *oem); 155int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version);
156 156
157struct isci_orom; 157struct isci_orom;
158struct isci_orom *isci_request_oprom(struct pci_dev *pdev); 158struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
@@ -191,6 +191,11 @@ struct isci_oem_hdr {
191 0x1a, 0x04, 0xc6) 191 0x1a, 0x04, 0xc6)
192#define ISCI_EFI_VAR_NAME "RstScuO" 192#define ISCI_EFI_VAR_NAME "RstScuO"
193 193
194#define ISCI_ROM_VER_1_0 0x10
195#define ISCI_ROM_VER_1_1 0x11
196#define ISCI_ROM_VER_1_3 0x13
197#define ISCI_ROM_VER_LATEST ISCI_ROM_VER_1_3
198
194/* Allowed PORT configuration modes APC Automatic PORT configuration mode is 199/* Allowed PORT configuration modes APC Automatic PORT configuration mode is
195 * defined by the OEM configuration parameters providing no PHY_MASK parameters 200 * defined by the OEM configuration parameters providing no PHY_MASK parameters
196 * for any PORT. i.e. There are no phys assigned to any of the ports at start. 201 * for any PORT. i.e. There are no phys assigned to any of the ports at start.
@@ -220,8 +225,86 @@ struct sci_oem_params {
220 struct { 225 struct {
221 uint8_t mode_type; 226 uint8_t mode_type;
222 uint8_t max_concurr_spin_up; 227 uint8_t max_concurr_spin_up;
223 uint8_t do_enable_ssc; 228 /*
224 uint8_t reserved; 229 * This bitfield indicates the OEM's desired default Tx
230 * Spread Spectrum Clocking (SSC) settings for SATA and SAS.
231 * NOTE: Default SSC Modulation Frequency is 31.5KHz.
232 */
233 union {
234 struct {
235 /*
236 * NOTE: Max spread for SATA is +0 / -5000 PPM.
237 * Down-spreading SSC (only method allowed for SATA):
238 * SATA SSC Tx Disabled = 0x0
239 * SATA SSC Tx at +0 / -1419 PPM Spread = 0x2
240 * SATA SSC Tx at +0 / -2129 PPM Spread = 0x3
241 * SATA SSC Tx at +0 / -4257 PPM Spread = 0x6
242 * SATA SSC Tx at +0 / -4967 PPM Spread = 0x7
243 */
244 uint8_t ssc_sata_tx_spread_level:4;
245 /*
246 * SAS SSC Tx Disabled = 0x0
247 *
248 * NOTE: Max spread for SAS down-spreading +0 /
249 * -2300 PPM
250 * Down-spreading SSC:
251 * SAS SSC Tx at +0 / -1419 PPM Spread = 0x2
252 * SAS SSC Tx at +0 / -2129 PPM Spread = 0x3
253 *
254 * NOTE: Max spread for SAS center-spreading +2300 /
255 * -2300 PPM
256 * Center-spreading SSC:
257 * SAS SSC Tx at +1064 / -1064 PPM Spread = 0x3
258 * SAS SSC Tx at +2129 / -2129 PPM Spread = 0x6
259 */
260 uint8_t ssc_sas_tx_spread_level:3;
261 /*
262 * NOTE: Refer to the SSC section of the SAS 2.x
263 * Specification for proper setting of this field.
264 * For standard SAS Initiator SAS PHY operation it
265 * should be 0 for Down-spreading.
266 * SAS SSC Tx spread type:
267 * Down-spreading SSC = 0
268 * Center-spreading SSC = 1
269 */
270 uint8_t ssc_sas_tx_type:1;
271 };
272 uint8_t do_enable_ssc;
273 };
274 /*
275 * This field indicates length of the SAS/SATA cable between
276 * host and device.
277 * This field is used make relationship between analog
278 * parameters of the phy in the silicon and length of the cable.
279 * Supported cable attenuation levels:
280 * "short"- up to 3m, "medium"-3m to 6m, and "long"- more than
281 * 6m.
282 *
283 * This is bit mask field:
284 *
285 * BIT: (MSB) 7 6 5 4
286 * ASSIGNMENT: <phy3><phy2><phy1><phy0> - Medium cable
287 * length assignment
288 * BIT: 3 2 1 0 (LSB)
289 * ASSIGNMENT: <phy3><phy2><phy1><phy0> - Long cable length
290 * assignment
291 *
292 * BITS 7-4 are set when the cable length is assigned to medium
293 * BITS 3-0 are set when the cable length is assigned to long
294 *
295 * The BIT positions are clear when the cable length is
296 * assigned to short.
297 *
298 * Setting the bits for both long and medium cable length is
299 * undefined.
300 *
301 * A value of 0x84 would assign
302 * phy3 - medium
303 * phy2 - long
304 * phy1 - short
305 * phy0 - short
306 */
307 uint8_t cable_selection_mask;
225 } controller; 308 } controller;
226 309
227 struct { 310 struct {
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index b207cd3b15a0..dd74b6ceeb82 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -53,6 +53,7 @@
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */ 54 */
55#include <scsi/sas.h> 55#include <scsi/sas.h>
56#include <linux/bitops.h>
56#include "isci.h" 57#include "isci.h"
57#include "port.h" 58#include "port.h"
58#include "remote_device.h" 59#include "remote_device.h"
@@ -1101,6 +1102,7 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1101 struct isci_remote_device *idev) 1102 struct isci_remote_device *idev)
1102{ 1103{
1103 enum sci_status status; 1104 enum sci_status status;
1105 struct sci_port_properties properties;
1104 struct domain_device *dev = idev->domain_dev; 1106 struct domain_device *dev = idev->domain_dev;
1105 1107
1106 sci_remote_device_construct(iport, idev); 1108 sci_remote_device_construct(iport, idev);
@@ -1110,6 +1112,11 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1110 * entries will be needed to store the remote node. 1112 * entries will be needed to store the remote node.
1111 */ 1113 */
1112 idev->is_direct_attached = true; 1114 idev->is_direct_attached = true;
1115
1116 sci_port_get_properties(iport, &properties);
1117 /* Get accurate port width from port's phy mask for a DA device. */
1118 idev->device_port_width = hweight32(properties.phy_mask);
1119
1113 status = sci_controller_allocate_remote_node_context(iport->owning_controller, 1120 status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1114 idev, 1121 idev,
1115 &idev->rnc.remote_node_index); 1122 &idev->rnc.remote_node_index);
@@ -1125,9 +1132,6 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1125 1132
1126 idev->connection_rate = sci_port_get_max_allowed_speed(iport); 1133 idev->connection_rate = sci_port_get_max_allowed_speed(iport);
1127 1134
1128 /* / @todo Should I assign the port width by reading all of the phys on the port? */
1129 idev->device_port_width = 1;
1130
1131 return SCI_SUCCESS; 1135 return SCI_SUCCESS;
1132} 1136}
1133 1137
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 66ad3dc89498..f5a3f7d2bdab 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -496,7 +496,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
496 } 496 }
497 } 497 }
498 498
499 isci_print_tmf(tmf); 499 isci_print_tmf(ihost, tmf);
500 500
501 if (tmf->status == SCI_SUCCESS) 501 if (tmf->status == SCI_SUCCESS)
502 ret = TMF_RESP_FUNC_COMPLETE; 502 ret = TMF_RESP_FUNC_COMPLETE;
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index bc78c0a41d5c..1b27b3797c6c 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -106,7 +106,6 @@ struct isci_tmf {
106 } resp; 106 } resp;
107 unsigned char lun[8]; 107 unsigned char lun[8];
108 u16 io_tag; 108 u16 io_tag;
109 struct isci_remote_device *device;
110 enum isci_tmf_function_codes tmf_code; 109 enum isci_tmf_function_codes tmf_code;
111 int status; 110 int status;
112 111
@@ -120,10 +119,10 @@ struct isci_tmf {
120 119
121}; 120};
122 121
123static inline void isci_print_tmf(struct isci_tmf *tmf) 122static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
124{ 123{
125 if (SAS_PROTOCOL_SATA == tmf->proto) 124 if (SAS_PROTOCOL_SATA == tmf->proto)
126 dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev, 125 dev_dbg(&ihost->pdev->dev,
127 "%s: status = %x\n" 126 "%s: status = %x\n"
128 "tmf->resp.d2h_fis.status = %x\n" 127 "tmf->resp.d2h_fis.status = %x\n"
129 "tmf->resp.d2h_fis.error = %x\n", 128 "tmf->resp.d2h_fis.error = %x\n",
@@ -132,7 +131,7 @@ static inline void isci_print_tmf(struct isci_tmf *tmf)
132 tmf->resp.d2h_fis.status, 131 tmf->resp.d2h_fis.status,
133 tmf->resp.d2h_fis.error); 132 tmf->resp.d2h_fis.error);
134 else 133 else
135 dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev, 134 dev_dbg(&ihost->pdev->dev,
136 "%s: status = %x\n" 135 "%s: status = %x\n"
137 "tmf->resp.resp_iu.data_present = %x\n" 136 "tmf->resp.resp_iu.data_present = %x\n"
138 "tmf->resp.resp_iu.status = %x\n" 137 "tmf->resp.resp_iu.status = %x\n"
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 7269e928824a..1d1b0c9da29b 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -61,7 +61,7 @@ static void fc_disc_restart(struct fc_disc *);
61 * Locking Note: This function expects that the lport mutex is locked before 61 * Locking Note: This function expects that the lport mutex is locked before
62 * calling it. 62 * calling it.
63 */ 63 */
64void fc_disc_stop_rports(struct fc_disc *disc) 64static void fc_disc_stop_rports(struct fc_disc *disc)
65{ 65{
66 struct fc_lport *lport; 66 struct fc_lport *lport;
67 struct fc_rport_priv *rdata; 67 struct fc_rport_priv *rdata;
@@ -682,7 +682,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
682 * fc_disc_stop() - Stop discovery for a given lport 682 * fc_disc_stop() - Stop discovery for a given lport
683 * @lport: The local port that discovery should stop on 683 * @lport: The local port that discovery should stop on
684 */ 684 */
685void fc_disc_stop(struct fc_lport *lport) 685static void fc_disc_stop(struct fc_lport *lport)
686{ 686{
687 struct fc_disc *disc = &lport->disc; 687 struct fc_disc *disc = &lport->disc;
688 688
@@ -698,7 +698,7 @@ void fc_disc_stop(struct fc_lport *lport)
698 * This function will block until discovery has been 698 * This function will block until discovery has been
699 * completely stopped and all rports have been deleted. 699 * completely stopped and all rports have been deleted.
700 */ 700 */
701void fc_disc_stop_final(struct fc_lport *lport) 701static void fc_disc_stop_final(struct fc_lport *lport)
702{ 702{
703 fc_disc_stop(lport); 703 fc_disc_stop(lport);
704 lport->tt.rport_flush_queue(); 704 lport->tt.rport_flush_queue();
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index fb9161dc4ca6..e17a28d324d0 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -28,6 +28,7 @@
28#include <scsi/fc/fc_els.h> 28#include <scsi/fc/fc_els.h>
29#include <scsi/libfc.h> 29#include <scsi/libfc.h>
30#include <scsi/fc_encode.h> 30#include <scsi/fc_encode.h>
31#include "fc_libfc.h"
31 32
32/** 33/**
33 * fc_elsct_send() - Send an ELS or CT frame 34 * fc_elsct_send() - Send an ELS or CT frame
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 9de9db27e874..4d70d96fa5dc 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -91,7 +91,7 @@ struct fc_exch_pool {
91 * It manages the allocation of exchange IDs. 91 * It manages the allocation of exchange IDs.
92 */ 92 */
93struct fc_exch_mgr { 93struct fc_exch_mgr {
94 struct fc_exch_pool *pool; 94 struct fc_exch_pool __percpu *pool;
95 mempool_t *ep_pool; 95 mempool_t *ep_pool;
96 enum fc_class class; 96 enum fc_class class;
97 struct kref kref; 97 struct kref kref;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 221875ec3d7c..f607314810ac 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -155,6 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
155 fsp->xfer_ddp = FC_XID_UNKNOWN; 155 fsp->xfer_ddp = FC_XID_UNKNOWN;
156 atomic_set(&fsp->ref_cnt, 1); 156 atomic_set(&fsp->ref_cnt, 1);
157 init_timer(&fsp->timer); 157 init_timer(&fsp->timer);
158 fsp->timer.data = (unsigned long)fsp;
158 INIT_LIST_HEAD(&fsp->list); 159 INIT_LIST_HEAD(&fsp->list);
159 spin_lock_init(&fsp->scsi_pkt_lock); 160 spin_lock_init(&fsp->scsi_pkt_lock);
160 } 161 }
@@ -1850,9 +1851,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
1850 } 1851 }
1851 put_cpu(); 1852 put_cpu();
1852 1853
1853 init_timer(&fsp->timer);
1854 fsp->timer.data = (unsigned long)fsp;
1855
1856 /* 1854 /*
1857 * send it to the lower layer 1855 * send it to the lower layer
1858 * if we get -1 return then put the request in the pending 1856 * if we get -1 return then put the request in the pending
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e77094a587ed..83750ebb527f 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -677,7 +677,8 @@ EXPORT_SYMBOL(fc_set_mfs);
677 * @lport: The local port receiving the event 677 * @lport: The local port receiving the event
678 * @event: The discovery event 678 * @event: The discovery event
679 */ 679 */
680void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) 680static void fc_lport_disc_callback(struct fc_lport *lport,
681 enum fc_disc_event event)
681{ 682{
682 switch (event) { 683 switch (event) {
683 case DISC_EV_SUCCESS: 684 case DISC_EV_SUCCESS:
@@ -1568,7 +1569,7 @@ EXPORT_SYMBOL(fc_lport_flogi_resp);
1568 * Locking Note: The lport lock is expected to be held before calling 1569 * Locking Note: The lport lock is expected to be held before calling
1569 * this routine. 1570 * this routine.
1570 */ 1571 */
1571void fc_lport_enter_flogi(struct fc_lport *lport) 1572static void fc_lport_enter_flogi(struct fc_lport *lport)
1572{ 1573{
1573 struct fc_frame *fp; 1574 struct fc_frame *fp;
1574 1575
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index b9e434844a69..83aa1efec875 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -391,7 +391,7 @@ static void fc_rport_work(struct work_struct *work)
391 * If it appears we are already logged in, ADISC is used to verify 391 * If it appears we are already logged in, ADISC is used to verify
392 * the setup. 392 * the setup.
393 */ 393 */
394int fc_rport_login(struct fc_rport_priv *rdata) 394static int fc_rport_login(struct fc_rport_priv *rdata)
395{ 395{
396 mutex_lock(&rdata->rp_mutex); 396 mutex_lock(&rdata->rp_mutex);
397 397
@@ -451,7 +451,7 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
451 * function will hold the rport lock, call an _enter_* 451 * function will hold the rport lock, call an _enter_*
452 * function and then unlock the rport. 452 * function and then unlock the rport.
453 */ 453 */
454int fc_rport_logoff(struct fc_rport_priv *rdata) 454static int fc_rport_logoff(struct fc_rport_priv *rdata)
455{ 455{
456 mutex_lock(&rdata->rp_mutex); 456 mutex_lock(&rdata->rp_mutex);
457 457
@@ -653,8 +653,8 @@ static int fc_rport_login_complete(struct fc_rport_priv *rdata,
653 * @fp: The FLOGI response frame 653 * @fp: The FLOGI response frame
654 * @rp_arg: The remote port that received the FLOGI response 654 * @rp_arg: The remote port that received the FLOGI response
655 */ 655 */
656void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, 656static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
657 void *rp_arg) 657 void *rp_arg)
658{ 658{
659 struct fc_rport_priv *rdata = rp_arg; 659 struct fc_rport_priv *rdata = rp_arg;
660 struct fc_lport *lport = rdata->local_port; 660 struct fc_lport *lport = rdata->local_port;
@@ -1520,7 +1520,7 @@ reject:
1520 * 1520 *
1521 * Locking Note: Called with the lport lock held. 1521 * Locking Note: Called with the lport lock held.
1522 */ 1522 */
1523void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp) 1523static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
1524{ 1524{
1525 struct fc_seq_els_data els_data; 1525 struct fc_seq_els_data els_data;
1526 1526
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 5c1776406c96..15eefa1d61fd 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -306,19 +306,22 @@ mega_query_adapter(adapter_t *adapter)
306 adapter->host->sg_tablesize = adapter->sglen; 306 adapter->host->sg_tablesize = adapter->sglen;
307 307
308 308
309 /* use HP firmware and bios version encoding */ 309 /* use HP firmware and bios version encoding
310 Note: fw_version[0|1] and bios_version[0|1] were originally shifted
311 right 8 bits making them zero. This 0 value was hardcoded to fix
312 sparse warnings. */
310 if (adapter->product_info.subsysvid == HP_SUBSYS_VID) { 313 if (adapter->product_info.subsysvid == HP_SUBSYS_VID) {
311 sprintf (adapter->fw_version, "%c%d%d.%d%d", 314 sprintf (adapter->fw_version, "%c%d%d.%d%d",
312 adapter->product_info.fw_version[2], 315 adapter->product_info.fw_version[2],
313 adapter->product_info.fw_version[1] >> 8, 316 0,
314 adapter->product_info.fw_version[1] & 0x0f, 317 adapter->product_info.fw_version[1] & 0x0f,
315 adapter->product_info.fw_version[0] >> 8, 318 0,
316 adapter->product_info.fw_version[0] & 0x0f); 319 adapter->product_info.fw_version[0] & 0x0f);
317 sprintf (adapter->bios_version, "%c%d%d.%d%d", 320 sprintf (adapter->bios_version, "%c%d%d.%d%d",
318 adapter->product_info.bios_version[2], 321 adapter->product_info.bios_version[2],
319 adapter->product_info.bios_version[1] >> 8, 322 0,
320 adapter->product_info.bios_version[1] & 0x0f, 323 adapter->product_info.bios_version[1] & 0x0f,
321 adapter->product_info.bios_version[0] >> 8, 324 0,
322 adapter->product_info.bios_version[0] & 0x0f); 325 adapter->product_info.bios_version[0] & 0x0f);
323 } else { 326 } else {
324 memcpy(adapter->fw_version, 327 memcpy(adapter->fw_version,
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index dd94c7d574fb..e5f416f8042d 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
33/* 33/*
34 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
35 */ 35 */
36#define MEGASAS_VERSION "00.00.06.12-rc1" 36#define MEGASAS_VERSION "00.00.06.14-rc1"
37#define MEGASAS_RELDATE "Oct. 5, 2011" 37#define MEGASAS_RELDATE "Jan. 6, 2012"
38#define MEGASAS_EXT_VERSION "Wed. Oct. 5 17:00:00 PDT 2011" 38#define MEGASAS_EXT_VERSION "Fri. Jan. 6 17:00:00 PDT 2012"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
@@ -773,7 +773,6 @@ struct megasas_ctrl_info {
773 773
774#define MFI_OB_INTR_STATUS_MASK 0x00000002 774#define MFI_OB_INTR_STATUS_MASK 0x00000002
775#define MFI_POLL_TIMEOUT_SECS 60 775#define MFI_POLL_TIMEOUT_SECS 60
776#define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10)
777 776
778#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 777#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
779#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001 778#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
@@ -1353,7 +1352,6 @@ struct megasas_instance {
1353 u32 mfiStatus; 1352 u32 mfiStatus;
1354 u32 last_seq_num; 1353 u32 last_seq_num;
1355 1354
1356 struct timer_list io_completion_timer;
1357 struct list_head internal_reset_pending_q; 1355 struct list_head internal_reset_pending_q;
1358 1356
1359 /* Ptr to hba specific information */ 1357 /* Ptr to hba specific information */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 29a994f9c4f1..8b300be44284 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * FILE: megaraid_sas_base.c
21 * Version : v00.00.06.12-rc1 21 * Version : v00.00.06.14-rc1
22 * 22 *
23 * Authors: LSI Corporation 23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 24 * Sreenivas Bagalkote
@@ -59,14 +59,6 @@
59#include "megaraid_sas.h" 59#include "megaraid_sas.h"
60 60
61/* 61/*
62 * poll_mode_io:1- schedule complete completion from q cmd
63 */
64static unsigned int poll_mode_io;
65module_param_named(poll_mode_io, poll_mode_io, int, 0);
66MODULE_PARM_DESC(poll_mode_io,
67 "Complete cmds from IO path, (default=0)");
68
69/*
70 * Number of sectors per IO command 62 * Number of sectors per IO command
71 * Will be set in megasas_init_mfi if user does not provide 63 * Will be set in megasas_init_mfi if user does not provide
72 */ 64 */
@@ -1439,11 +1431,6 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance,
1439 1431
1440 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 1432 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1441 cmd->frame_count-1, instance->reg_set); 1433 cmd->frame_count-1, instance->reg_set);
1442 /*
1443 * Check if we have pend cmds to be completed
1444 */
1445 if (poll_mode_io && atomic_read(&instance->fw_outstanding))
1446 tasklet_schedule(&instance->isr_tasklet);
1447 1434
1448 return 0; 1435 return 0;
1449out_return_cmd: 1436out_return_cmd:
@@ -3370,47 +3357,6 @@ fail_fw_init:
3370 return -EINVAL; 3357 return -EINVAL;
3371} 3358}
3372 3359
3373/**
3374 * megasas_start_timer - Initializes a timer object
3375 * @instance: Adapter soft state
3376 * @timer: timer object to be initialized
3377 * @fn: timer function
3378 * @interval: time interval between timer function call
3379 */
3380static inline void
3381megasas_start_timer(struct megasas_instance *instance,
3382 struct timer_list *timer,
3383 void *fn, unsigned long interval)
3384{
3385 init_timer(timer);
3386 timer->expires = jiffies + interval;
3387 timer->data = (unsigned long)instance;
3388 timer->function = fn;
3389 add_timer(timer);
3390}
3391
3392/**
3393 * megasas_io_completion_timer - Timer fn
3394 * @instance_addr: Address of adapter soft state
3395 *
3396 * Schedules tasklet for cmd completion
3397 * if poll_mode_io is set
3398 */
3399static void
3400megasas_io_completion_timer(unsigned long instance_addr)
3401{
3402 struct megasas_instance *instance =
3403 (struct megasas_instance *)instance_addr;
3404
3405 if (atomic_read(&instance->fw_outstanding))
3406 tasklet_schedule(&instance->isr_tasklet);
3407
3408 /* Restart timer */
3409 if (poll_mode_io)
3410 mod_timer(&instance->io_completion_timer,
3411 jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL);
3412}
3413
3414static u32 3360static u32
3415megasas_init_adapter_mfi(struct megasas_instance *instance) 3361megasas_init_adapter_mfi(struct megasas_instance *instance)
3416{ 3362{
@@ -3638,11 +3584,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
3638 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 3584 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
3639 (unsigned long)instance); 3585 (unsigned long)instance);
3640 3586
3641 /* Initialize the cmd completion timer */
3642 if (poll_mode_io)
3643 megasas_start_timer(instance, &instance->io_completion_timer,
3644 megasas_io_completion_timer,
3645 MEGASAS_COMPLETION_TIMER_INTERVAL);
3646 return 0; 3587 return 0;
3647 3588
3648fail_init_adapter: 3589fail_init_adapter:
@@ -4369,9 +4310,6 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
4369 host = instance->host; 4310 host = instance->host;
4370 instance->unload = 1; 4311 instance->unload = 1;
4371 4312
4372 if (poll_mode_io)
4373 del_timer_sync(&instance->io_completion_timer);
4374
4375 megasas_flush_cache(instance); 4313 megasas_flush_cache(instance);
4376 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 4314 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
4377 4315
@@ -4511,12 +4449,6 @@ megasas_resume(struct pci_dev *pdev)
4511 } 4449 }
4512 4450
4513 instance->instancet->enable_intr(instance->reg_set); 4451 instance->instancet->enable_intr(instance->reg_set);
4514
4515 /* Initialize the cmd completion timer */
4516 if (poll_mode_io)
4517 megasas_start_timer(instance, &instance->io_completion_timer,
4518 megasas_io_completion_timer,
4519 MEGASAS_COMPLETION_TIMER_INTERVAL);
4520 instance->unload = 0; 4452 instance->unload = 0;
4521 4453
4522 /* 4454 /*
@@ -4570,9 +4502,6 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
4570 host = instance->host; 4502 host = instance->host;
4571 fusion = instance->ctrl_context; 4503 fusion = instance->ctrl_context;
4572 4504
4573 if (poll_mode_io)
4574 del_timer_sync(&instance->io_completion_timer);
4575
4576 scsi_remove_host(instance->host); 4505 scsi_remove_host(instance->host);
4577 megasas_flush_cache(instance); 4506 megasas_flush_cache(instance);
4578 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 4507 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -4773,6 +4702,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
4773 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 4702 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
4774 cmd->frame->hdr.context = cmd->index; 4703 cmd->frame->hdr.context = cmd->index;
4775 cmd->frame->hdr.pad_0 = 0; 4704 cmd->frame->hdr.pad_0 = 0;
4705 cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
4706 MFI_FRAME_SENSE64);
4776 4707
4777 /* 4708 /*
4778 * The management interface between applications and the fw uses 4709 * The management interface between applications and the fw uses
@@ -5219,60 +5150,6 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
5219static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl, 5150static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
5220 megasas_sysfs_set_dbg_lvl); 5151 megasas_sysfs_set_dbg_lvl);
5221 5152
5222static ssize_t
5223megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf)
5224{
5225 return sprintf(buf, "%u\n", poll_mode_io);
5226}
5227
5228static ssize_t
5229megasas_sysfs_set_poll_mode_io(struct device_driver *dd,
5230 const char *buf, size_t count)
5231{
5232 int retval = count;
5233 int tmp = poll_mode_io;
5234 int i;
5235 struct megasas_instance *instance;
5236
5237 if (sscanf(buf, "%u", &poll_mode_io) < 1) {
5238 printk(KERN_ERR "megasas: could not set poll_mode_io\n");
5239 retval = -EINVAL;
5240 }
5241
5242 /*
5243 * Check if poll_mode_io is already set or is same as previous value
5244 */
5245 if ((tmp && poll_mode_io) || (tmp == poll_mode_io))
5246 goto out;
5247
5248 if (poll_mode_io) {
5249 /*
5250 * Start timers for all adapters
5251 */
5252 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
5253 instance = megasas_mgmt_info.instance[i];
5254 if (instance) {
5255 megasas_start_timer(instance,
5256 &instance->io_completion_timer,
5257 megasas_io_completion_timer,
5258 MEGASAS_COMPLETION_TIMER_INTERVAL);
5259 }
5260 }
5261 } else {
5262 /*
5263 * Delete timers for all adapters
5264 */
5265 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
5266 instance = megasas_mgmt_info.instance[i];
5267 if (instance)
5268 del_timer_sync(&instance->io_completion_timer);
5269 }
5270 }
5271
5272out:
5273 return retval;
5274}
5275
5276static void 5153static void
5277megasas_aen_polling(struct work_struct *work) 5154megasas_aen_polling(struct work_struct *work)
5278{ 5155{
@@ -5502,11 +5379,6 @@ megasas_aen_polling(struct work_struct *work)
5502 kfree(ev); 5379 kfree(ev);
5503} 5380}
5504 5381
5505
5506static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR,
5507 megasas_sysfs_show_poll_mode_io,
5508 megasas_sysfs_set_poll_mode_io);
5509
5510/** 5382/**
5511 * megasas_init - Driver load entry point 5383 * megasas_init - Driver load entry point
5512 */ 5384 */
@@ -5566,11 +5438,6 @@ static int __init megasas_init(void)
5566 if (rval) 5438 if (rval)
5567 goto err_dcf_dbg_lvl; 5439 goto err_dcf_dbg_lvl;
5568 rval = driver_create_file(&megasas_pci_driver.driver, 5440 rval = driver_create_file(&megasas_pci_driver.driver,
5569 &driver_attr_poll_mode_io);
5570 if (rval)
5571 goto err_dcf_poll_mode_io;
5572
5573 rval = driver_create_file(&megasas_pci_driver.driver,
5574 &driver_attr_support_device_change); 5441 &driver_attr_support_device_change);
5575 if (rval) 5442 if (rval)
5576 goto err_dcf_support_device_change; 5443 goto err_dcf_support_device_change;
@@ -5579,10 +5446,6 @@ static int __init megasas_init(void)
5579 5446
5580err_dcf_support_device_change: 5447err_dcf_support_device_change:
5581 driver_remove_file(&megasas_pci_driver.driver, 5448 driver_remove_file(&megasas_pci_driver.driver,
5582 &driver_attr_poll_mode_io);
5583
5584err_dcf_poll_mode_io:
5585 driver_remove_file(&megasas_pci_driver.driver,
5586 &driver_attr_dbg_lvl); 5449 &driver_attr_dbg_lvl);
5587err_dcf_dbg_lvl: 5450err_dcf_dbg_lvl:
5588 driver_remove_file(&megasas_pci_driver.driver, 5451 driver_remove_file(&megasas_pci_driver.driver,
@@ -5607,8 +5470,6 @@ err_pcidrv:
5607static void __exit megasas_exit(void) 5470static void __exit megasas_exit(void)
5608{ 5471{
5609 driver_remove_file(&megasas_pci_driver.driver, 5472 driver_remove_file(&megasas_pci_driver.driver,
5610 &driver_attr_poll_mode_io);
5611 driver_remove_file(&megasas_pci_driver.driver,
5612 &driver_attr_dbg_lvl); 5473 &driver_attr_dbg_lvl);
5613 driver_remove_file(&megasas_pci_driver.driver, 5474 driver_remove_file(&megasas_pci_driver.driver,
5614 &driver_attr_support_poll_for_event); 5475 &driver_attr_support_poll_for_event);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 5255dd688aca..294abb0defa6 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -282,7 +282,9 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
282 else { 282 else {
283 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ 283 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
284 if ((raid->level >= 5) && 284 if ((raid->level >= 5) &&
285 (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER)) 285 ((instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) ||
286 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER &&
287 raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
286 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 288 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
287 else if (raid->level == 1) { 289 else if (raid->level == 1) {
288 /* Get alternate Pd. */ 290 /* Get alternate Pd. */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 22a3ff02e48a..bfe68545203f 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -150,6 +150,8 @@
150#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */ 150#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */
151 /* recovery timeout */ 151 /* recovery timeout */
152 152
153#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
154#define LSW(x) ((uint16_t)(x))
153#define LSDW(x) ((u32)((u64)(x))) 155#define LSDW(x) ((u32)((u64)(x)))
154#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16)) 156#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
155 157
@@ -671,6 +673,7 @@ struct scsi_qla_host {
671 uint16_t pri_ddb_idx; 673 uint16_t pri_ddb_idx;
672 uint16_t sec_ddb_idx; 674 uint16_t sec_ddb_idx;
673 int is_reset; 675 int is_reset;
676 uint16_t temperature;
674}; 677};
675 678
676struct ql4_task_data { 679struct ql4_task_data {
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 1bdfa8120ac8..90614f38b55d 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -697,6 +697,9 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
697 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), 697 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
698 &ha->reg->ctrl_status); 698 &ha->reg->ctrl_status);
699 readl(&ha->reg->ctrl_status); 699 readl(&ha->reg->ctrl_status);
700 writel(set_rmask(CSR_SCSI_COMPLETION_INTR),
701 &ha->reg->ctrl_status);
702 readl(&ha->reg->ctrl_status);
700 spin_unlock_irqrestore(&ha->hardware_lock, flags); 703 spin_unlock_irqrestore(&ha->hardware_lock, flags);
701 if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) { 704 if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) {
702 DEBUG2(printk("scsi%ld: %s: Get firmware " 705 DEBUG2(printk("scsi%ld: %s: Get firmware "
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index c2593782fbbe..e1e66a45e4d0 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -219,6 +219,13 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
219 ha->mailbox_timeout_count++; 219 ha->mailbox_timeout_count++;
220 mbx_sts[0] = (-1); 220 mbx_sts[0] = (-1);
221 set_bit(DPC_RESET_HA, &ha->dpc_flags); 221 set_bit(DPC_RESET_HA, &ha->dpc_flags);
222 if (is_qla8022(ha)) {
223 ql4_printk(KERN_INFO, ha,
224 "disabling pause transmit on port 0 & 1.\n");
225 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
226 CRB_NIU_XG_PAUSE_CTL_P0 |
227 CRB_NIU_XG_PAUSE_CTL_P1);
228 }
222 goto mbox_exit; 229 goto mbox_exit;
223 } 230 }
224 231
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 8d6bc1b2ff17..78f1111158d7 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1875,6 +1875,11 @@ exit:
1875int qla4_8xxx_load_risc(struct scsi_qla_host *ha) 1875int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
1876{ 1876{
1877 int retval; 1877 int retval;
1878
1879 /* clear the interrupt */
1880 writel(0, &ha->qla4_8xxx_reg->host_int);
1881 readl(&ha->qla4_8xxx_reg->host_int);
1882
1878 retval = qla4_8xxx_device_state_handler(ha); 1883 retval = qla4_8xxx_device_state_handler(ha);
1879 1884
1880 if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags)) 1885 if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 35376a1c3f1b..dc45ac923691 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -19,12 +19,28 @@
19#define PHAN_PEG_RCV_INITIALIZED 0xff01 19#define PHAN_PEG_RCV_INITIALIZED 0xff01
20 20
21/*CRB_RELATED*/ 21/*CRB_RELATED*/
22#define QLA82XX_CRB_BASE QLA82XX_CAM_RAM(0x200) 22#define QLA82XX_CRB_BASE (QLA82XX_CAM_RAM(0x200))
23#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X)) 23#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X))
24
25#define CRB_CMDPEG_STATE QLA82XX_REG(0x50) 24#define CRB_CMDPEG_STATE QLA82XX_REG(0x50)
26#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) 25#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
27#define CRB_DMA_SHIFT QLA82XX_REG(0xcc) 26#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
27#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
28
29#define qla82xx_get_temp_val(x) ((x) >> 16)
30#define qla82xx_get_temp_state(x) ((x) & 0xffff)
31#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
32
33/*
34 * Temperature control.
35 */
36enum {
37 QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
38 QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
39 QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
40};
41
42#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
43#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
28 44
29#define QLA82XX_HW_H0_CH_HUB_ADR 0x05 45#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
30#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E 46#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ec393a00c038..ce6d3b7f0c61 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -35,43 +35,44 @@ static struct kmem_cache *srb_cachep;
35int ql4xdisablesysfsboot = 1; 35int ql4xdisablesysfsboot = 1;
36module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR); 36module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(ql4xdisablesysfsboot, 37MODULE_PARM_DESC(ql4xdisablesysfsboot,
38 "Set to disable exporting boot targets to sysfs\n" 38 " Set to disable exporting boot targets to sysfs.\n"
39 " 0 - Export boot targets\n" 39 "\t\t 0 - Export boot targets\n"
40 " 1 - Do not export boot targets (Default)"); 40 "\t\t 1 - Do not export boot targets (Default)");
41 41
42int ql4xdontresethba = 0; 42int ql4xdontresethba = 0;
43module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); 43module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
44MODULE_PARM_DESC(ql4xdontresethba, 44MODULE_PARM_DESC(ql4xdontresethba,
45 "Don't reset the HBA for driver recovery \n" 45 " Don't reset the HBA for driver recovery.\n"
46 " 0 - It will reset HBA (Default)\n" 46 "\t\t 0 - It will reset HBA (Default)\n"
47 " 1 - It will NOT reset HBA"); 47 "\t\t 1 - It will NOT reset HBA");
48 48
49int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */ 49int ql4xextended_error_logging;
50module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR); 50module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
51MODULE_PARM_DESC(ql4xextended_error_logging, 51MODULE_PARM_DESC(ql4xextended_error_logging,
52 "Option to enable extended error logging, " 52 " Option to enable extended error logging.\n"
53 "Default is 0 - no logging, 1 - debug logging"); 53 "\t\t 0 - no logging (Default)\n"
54 "\t\t 2 - debug logging");
54 55
55int ql4xenablemsix = 1; 56int ql4xenablemsix = 1;
56module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR); 57module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
57MODULE_PARM_DESC(ql4xenablemsix, 58MODULE_PARM_DESC(ql4xenablemsix,
58 "Set to enable MSI or MSI-X interrupt mechanism.\n" 59 " Set to enable MSI or MSI-X interrupt mechanism.\n"
59 " 0 = enable INTx interrupt mechanism.\n" 60 "\t\t 0 = enable INTx interrupt mechanism.\n"
60 " 1 = enable MSI-X interrupt mechanism (Default).\n" 61 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
61 " 2 = enable MSI interrupt mechanism."); 62 "\t\t 2 = enable MSI interrupt mechanism.");
62 63
63#define QL4_DEF_QDEPTH 32 64#define QL4_DEF_QDEPTH 32
64static int ql4xmaxqdepth = QL4_DEF_QDEPTH; 65static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
65module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); 66module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
66MODULE_PARM_DESC(ql4xmaxqdepth, 67MODULE_PARM_DESC(ql4xmaxqdepth,
67 "Maximum queue depth to report for target devices.\n" 68 " Maximum queue depth to report for target devices.\n"
68 " Default: 32."); 69 "\t\t Default: 32.");
69 70
70static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; 71static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
71module_param(ql4xsess_recovery_tmo, int, S_IRUGO); 72module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
72MODULE_PARM_DESC(ql4xsess_recovery_tmo, 73MODULE_PARM_DESC(ql4xsess_recovery_tmo,
73 "Target Session Recovery Timeout.\n" 74 "Target Session Recovery Timeout.\n"
74 " Default: 120 sec."); 75 "\t\t Default: 120 sec.");
75 76
76static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); 77static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
77/* 78/*
@@ -1630,7 +1631,9 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1630 1631
1631 /* Update timers after login */ 1632 /* Update timers after login */
1632 ddb_entry->default_relogin_timeout = 1633 ddb_entry->default_relogin_timeout =
1633 le16_to_cpu(fw_ddb_entry->def_timeout); 1634 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
1635 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
1636 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
1634 ddb_entry->default_time2wait = 1637 ddb_entry->default_time2wait =
1635 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 1638 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1636 1639
@@ -1970,6 +1973,42 @@ mem_alloc_error_exit:
1970} 1973}
1971 1974
1972/** 1975/**
1976 * qla4_8xxx_check_temp - Check the ISP82XX temperature.
1977 * @ha: adapter block pointer.
1978 *
1979 * Note: The caller should not hold the idc lock.
1980 **/
1981static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
1982{
1983 uint32_t temp, temp_state, temp_val;
1984 int status = QLA_SUCCESS;
1985
1986 temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE);
1987
1988 temp_state = qla82xx_get_temp_state(temp);
1989 temp_val = qla82xx_get_temp_val(temp);
1990
1991 if (temp_state == QLA82XX_TEMP_PANIC) {
1992 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
1993 " exceeds maximum allowed. Hardware has been shut"
1994 " down.\n", temp_val);
1995 status = QLA_ERROR;
1996 } else if (temp_state == QLA82XX_TEMP_WARN) {
1997 if (ha->temperature == QLA82XX_TEMP_NORMAL)
1998 ql4_printk(KERN_WARNING, ha, "Device temperature %d"
1999 " degrees C exceeds operating range."
2000 " Immediate action needed.\n", temp_val);
2001 } else {
2002 if (ha->temperature == QLA82XX_TEMP_WARN)
2003 ql4_printk(KERN_INFO, ha, "Device temperature is"
2004 " now %d degrees C in normal range.\n",
2005 temp_val);
2006 }
2007 ha->temperature = temp_state;
2008 return status;
2009}
2010
2011/**
1973 * qla4_8xxx_check_fw_alive - Check firmware health 2012 * qla4_8xxx_check_fw_alive - Check firmware health
1974 * @ha: Pointer to host adapter structure. 2013 * @ha: Pointer to host adapter structure.
1975 * 2014 *
@@ -2040,7 +2079,16 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2040 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 2079 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2041 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) { 2080 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
2042 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2081 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
2043 if (dev_state == QLA82XX_DEV_NEED_RESET && 2082
2083 if (qla4_8xxx_check_temp(ha)) {
2084 ql4_printk(KERN_INFO, ha, "disabling pause"
2085 " transmit on port 0 & 1.\n");
2086 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2087 CRB_NIU_XG_PAUSE_CTL_P0 |
2088 CRB_NIU_XG_PAUSE_CTL_P1);
2089 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2090 qla4xxx_wake_dpc(ha);
2091 } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
2044 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 2092 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
2045 if (!ql4xdontresethba) { 2093 if (!ql4xdontresethba) {
2046 ql4_printk(KERN_INFO, ha, "%s: HW State: " 2094 ql4_printk(KERN_INFO, ha, "%s: HW State: "
@@ -2057,9 +2105,21 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2057 } else { 2105 } else {
2058 /* Check firmware health */ 2106 /* Check firmware health */
2059 if (qla4_8xxx_check_fw_alive(ha)) { 2107 if (qla4_8xxx_check_fw_alive(ha)) {
2108 ql4_printk(KERN_INFO, ha, "disabling pause"
2109 " transmit on port 0 & 1.\n");
2110 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2111 CRB_NIU_XG_PAUSE_CTL_P0 |
2112 CRB_NIU_XG_PAUSE_CTL_P1);
2060 halt_status = qla4_8xxx_rd_32(ha, 2113 halt_status = qla4_8xxx_rd_32(ha,
2061 QLA82XX_PEG_HALT_STATUS1); 2114 QLA82XX_PEG_HALT_STATUS1);
2062 2115
2116 if (LSW(MSB(halt_status)) == 0x67)
2117 ql4_printk(KERN_ERR, ha, "%s:"
2118 " Firmware aborted with"
2119 " error code 0x00006700."
2120 " Device is being reset\n",
2121 __func__);
2122
2063 /* Since we cannot change dev_state in interrupt 2123 /* Since we cannot change dev_state in interrupt
2064 * context, set appropriate DPC flag then wakeup 2124 * context, set appropriate DPC flag then wakeup
2065 * DPC */ 2125 * DPC */
@@ -2078,7 +2138,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2078 } 2138 }
2079} 2139}
2080 2140
2081void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) 2141static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
2082{ 2142{
2083 struct iscsi_session *sess; 2143 struct iscsi_session *sess;
2084 struct ddb_entry *ddb_entry; 2144 struct ddb_entry *ddb_entry;
@@ -3826,16 +3886,14 @@ exit_check:
3826 return ret; 3886 return ret;
3827} 3887}
3828 3888
3829static void qla4xxx_free_nt_list(struct list_head *list_nt) 3889static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
3830{ 3890{
3831 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; 3891 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
3832 3892
3833 /* Free up the normaltargets list */ 3893 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
3834 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 3894 list_del_init(&ddb_idx->list);
3835 list_del_init(&nt_ddb_idx->list); 3895 vfree(ddb_idx);
3836 vfree(nt_ddb_idx);
3837 } 3896 }
3838
3839} 3897}
3840 3898
3841static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha, 3899static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
@@ -3884,6 +3942,8 @@ static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
3884static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, 3942static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
3885 struct ddb_entry *ddb_entry) 3943 struct ddb_entry *ddb_entry)
3886{ 3944{
3945 uint16_t def_timeout;
3946
3887 ddb_entry->ddb_type = FLASH_DDB; 3947 ddb_entry->ddb_type = FLASH_DDB;
3888 ddb_entry->fw_ddb_index = INVALID_ENTRY; 3948 ddb_entry->fw_ddb_index = INVALID_ENTRY;
3889 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 3949 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
@@ -3894,9 +3954,10 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
3894 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 3954 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
3895 atomic_set(&ddb_entry->relogin_timer, 0); 3955 atomic_set(&ddb_entry->relogin_timer, 0);
3896 atomic_set(&ddb_entry->relogin_retry_count, 0); 3956 atomic_set(&ddb_entry->relogin_retry_count, 0);
3897 3957 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
3898 ddb_entry->default_relogin_timeout = 3958 ddb_entry->default_relogin_timeout =
3899 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); 3959 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
3960 def_timeout : LOGIN_TOV;
3900 ddb_entry->default_time2wait = 3961 ddb_entry->default_time2wait =
3901 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait); 3962 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
3902} 3963}
@@ -3934,7 +3995,6 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
3934 ip_state == IP_ADDRSTATE_DEPRICATED || 3995 ip_state == IP_ADDRSTATE_DEPRICATED ||
3935 ip_state == IP_ADDRSTATE_DISABLING) 3996 ip_state == IP_ADDRSTATE_DISABLING)
3936 ip_idx[idx] = -1; 3997 ip_idx[idx] = -1;
3937
3938 } 3998 }
3939 3999
3940 /* Break if all IP states checked */ 4000 /* Break if all IP states checked */
@@ -3947,58 +4007,37 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
3947 } while (time_after(wtime, jiffies)); 4007 } while (time_after(wtime, jiffies));
3948} 4008}
3949 4009
3950void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) 4010static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
4011 struct list_head *list_st)
3951{ 4012{
4013 struct qla_ddb_index *st_ddb_idx;
3952 int max_ddbs; 4014 int max_ddbs;
4015 int fw_idx_size;
4016 struct dev_db_entry *fw_ddb_entry;
4017 dma_addr_t fw_ddb_dma;
3953 int ret; 4018 int ret;
3954 uint32_t idx = 0, next_idx = 0; 4019 uint32_t idx = 0, next_idx = 0;
3955 uint32_t state = 0, conn_err = 0; 4020 uint32_t state = 0, conn_err = 0;
3956 uint16_t conn_id; 4021 uint16_t conn_id = 0;
3957 struct dev_db_entry *fw_ddb_entry;
3958 struct ddb_entry *ddb_entry = NULL;
3959 dma_addr_t fw_ddb_dma;
3960 struct iscsi_cls_session *cls_sess;
3961 struct iscsi_session *sess;
3962 struct iscsi_cls_conn *cls_conn;
3963 struct iscsi_endpoint *ep;
3964 uint16_t cmds_max = 32, tmo = 0;
3965 uint32_t initial_cmdsn = 0;
3966 struct list_head list_st, list_nt; /* List of sendtargets */
3967 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
3968 int fw_idx_size;
3969 unsigned long wtime;
3970 struct qla_ddb_index *nt_ddb_idx;
3971
3972 if (!test_bit(AF_LINK_UP, &ha->flags)) {
3973 set_bit(AF_BUILD_DDB_LIST, &ha->flags);
3974 ha->is_reset = is_reset;
3975 return;
3976 }
3977 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
3978 MAX_DEV_DB_ENTRIES;
3979 4022
3980 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, 4023 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
3981 &fw_ddb_dma); 4024 &fw_ddb_dma);
3982 if (fw_ddb_entry == NULL) { 4025 if (fw_ddb_entry == NULL) {
3983 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); 4026 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
3984 goto exit_ddb_list; 4027 goto exit_st_list;
3985 } 4028 }
3986 4029
3987 INIT_LIST_HEAD(&list_st); 4030 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
3988 INIT_LIST_HEAD(&list_nt); 4031 MAX_DEV_DB_ENTRIES;
3989 fw_idx_size = sizeof(struct qla_ddb_index); 4032 fw_idx_size = sizeof(struct qla_ddb_index);
3990 4033
3991 for (idx = 0; idx < max_ddbs; idx = next_idx) { 4034 for (idx = 0; idx < max_ddbs; idx = next_idx) {
3992 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, 4035 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
3993 fw_ddb_dma, NULL, 4036 NULL, &next_idx, &state,
3994 &next_idx, &state, &conn_err, 4037 &conn_err, NULL, &conn_id);
3995 NULL, &conn_id);
3996 if (ret == QLA_ERROR) 4038 if (ret == QLA_ERROR)
3997 break; 4039 break;
3998 4040
3999 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
4000 goto continue_next_st;
4001
4002 /* Check if ST, add to the list_st */ 4041 /* Check if ST, add to the list_st */
4003 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0) 4042 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
4004 goto continue_next_st; 4043 goto continue_next_st;
@@ -4009,59 +4048,155 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
4009 4048
4010 st_ddb_idx->fw_ddb_idx = idx; 4049 st_ddb_idx->fw_ddb_idx = idx;
4011 4050
4012 list_add_tail(&st_ddb_idx->list, &list_st); 4051 list_add_tail(&st_ddb_idx->list, list_st);
4013continue_next_st: 4052continue_next_st:
4014 if (next_idx == 0) 4053 if (next_idx == 0)
4015 break; 4054 break;
4016 } 4055 }
4017 4056
4018 /* Before issuing conn open mbox, ensure all IPs states are configured 4057exit_st_list:
4019 * Note, conn open fails if IPs are not configured 4058 if (fw_ddb_entry)
4059 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4060}
4061
4062/**
4063 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
4064 * @ha: pointer to adapter structure
4065 * @list_ddb: List from which failed ddb to be removed
4066 *
4067 * Iterate over the list of DDBs and find and remove DDBs that are either in
4068 * no connection active state or failed state
4069 **/
4070static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
4071 struct list_head *list_ddb)
4072{
4073 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
4074 uint32_t next_idx = 0;
4075 uint32_t state = 0, conn_err = 0;
4076 int ret;
4077
4078 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
4079 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
4080 NULL, 0, NULL, &next_idx, &state,
4081 &conn_err, NULL, NULL);
4082 if (ret == QLA_ERROR)
4083 continue;
4084
4085 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
4086 state == DDB_DS_SESSION_FAILED) {
4087 list_del_init(&ddb_idx->list);
4088 vfree(ddb_idx);
4089 }
4090 }
4091}
4092
4093static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
4094 struct dev_db_entry *fw_ddb_entry,
4095 int is_reset)
4096{
4097 struct iscsi_cls_session *cls_sess;
4098 struct iscsi_session *sess;
4099 struct iscsi_cls_conn *cls_conn;
4100 struct iscsi_endpoint *ep;
4101 uint16_t cmds_max = 32;
4102 uint16_t conn_id = 0;
4103 uint32_t initial_cmdsn = 0;
4104 int ret = QLA_SUCCESS;
4105
4106 struct ddb_entry *ddb_entry = NULL;
4107
4108 /* Create session object, with INVALID_ENTRY,
4109 * the targer_id would get set when we issue the login
4020 */ 4110 */
4021 qla4xxx_wait_for_ip_configuration(ha); 4111 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
4112 cmds_max, sizeof(struct ddb_entry),
4113 sizeof(struct ql4_task_data),
4114 initial_cmdsn, INVALID_ENTRY);
4115 if (!cls_sess) {
4116 ret = QLA_ERROR;
4117 goto exit_setup;
4118 }
4022 4119
4023 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */ 4120 /*
4024 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { 4121 * so calling module_put function to decrement the
4025 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx); 4122 * reference count.
4123 **/
4124 module_put(qla4xxx_iscsi_transport.owner);
4125 sess = cls_sess->dd_data;
4126 ddb_entry = sess->dd_data;
4127 ddb_entry->sess = cls_sess;
4128
4129 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
4130 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
4131 sizeof(struct dev_db_entry));
4132
4133 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
4134
4135 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
4136
4137 if (!cls_conn) {
4138 ret = QLA_ERROR;
4139 goto exit_setup;
4026 } 4140 }
4027 4141
4028 /* Wait to ensure all sendtargets are done for min 12 sec wait */ 4142 ddb_entry->conn = cls_conn;
4029 tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
4030 DEBUG2(ql4_printk(KERN_INFO, ha,
4031 "Default time to wait for build ddb %d\n", tmo));
4032 4143
4033 wtime = jiffies + (HZ * tmo); 4144 /* Setup ep, for displaying attributes in sysfs */
4034 do { 4145 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
4035 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, 4146 if (ep) {
4036 list) { 4147 ep->conn = cls_conn;
4037 ret = qla4xxx_get_fwddb_entry(ha, 4148 cls_conn->ep = ep;
4038 st_ddb_idx->fw_ddb_idx, 4149 } else {
4039 NULL, 0, NULL, &next_idx, 4150 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
4040 &state, &conn_err, NULL, 4151 ret = QLA_ERROR;
4041 NULL); 4152 goto exit_setup;
4042 if (ret == QLA_ERROR) 4153 }
4043 continue;
4044 4154
4045 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 4155 /* Update sess/conn params */
4046 state == DDB_DS_SESSION_FAILED) { 4156 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
4047 list_del_init(&st_ddb_idx->list);
4048 vfree(st_ddb_idx);
4049 }
4050 }
4051 schedule_timeout_uninterruptible(HZ / 10);
4052 } while (time_after(wtime, jiffies));
4053 4157
4054 /* Free up the sendtargets list */ 4158 if (is_reset == RESET_ADAPTER) {
4055 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { 4159 iscsi_block_session(cls_sess);
4056 list_del_init(&st_ddb_idx->list); 4160 /* Use the relogin path to discover new devices
4057 vfree(st_ddb_idx); 4161 * by short-circuting the logic of setting
4162 * timer to relogin - instead set the flags
4163 * to initiate login right away.
4164 */
4165 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4166 set_bit(DF_RELOGIN, &ddb_entry->flags);
4058 } 4167 }
4059 4168
4169exit_setup:
4170 return ret;
4171}
4172
4173static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
4174 struct list_head *list_nt, int is_reset)
4175{
4176 struct dev_db_entry *fw_ddb_entry;
4177 dma_addr_t fw_ddb_dma;
4178 int max_ddbs;
4179 int fw_idx_size;
4180 int ret;
4181 uint32_t idx = 0, next_idx = 0;
4182 uint32_t state = 0, conn_err = 0;
4183 uint16_t conn_id = 0;
4184 struct qla_ddb_index *nt_ddb_idx;
4185
4186 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
4187 &fw_ddb_dma);
4188 if (fw_ddb_entry == NULL) {
4189 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
4190 goto exit_nt_list;
4191 }
4192 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
4193 MAX_DEV_DB_ENTRIES;
4194 fw_idx_size = sizeof(struct qla_ddb_index);
4195
4060 for (idx = 0; idx < max_ddbs; idx = next_idx) { 4196 for (idx = 0; idx < max_ddbs; idx = next_idx) {
4061 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, 4197 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
4062 fw_ddb_dma, NULL, 4198 NULL, &next_idx, &state,
4063 &next_idx, &state, &conn_err, 4199 &conn_err, NULL, &conn_id);
4064 NULL, &conn_id);
4065 if (ret == QLA_ERROR) 4200 if (ret == QLA_ERROR)
4066 break; 4201 break;
4067 4202
@@ -4072,107 +4207,113 @@ continue_next_st:
4072 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) 4207 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
4073 goto continue_next_nt; 4208 goto continue_next_nt;
4074 4209
4075 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 4210 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
4076 state == DDB_DS_SESSION_FAILED) { 4211 state == DDB_DS_SESSION_FAILED))
4077 DEBUG2(ql4_printk(KERN_INFO, ha, 4212 goto continue_next_nt;
4078 "Adding DDB to session = 0x%x\n",
4079 idx));
4080 if (is_reset == INIT_ADAPTER) {
4081 nt_ddb_idx = vmalloc(fw_idx_size);
4082 if (!nt_ddb_idx)
4083 break;
4084
4085 nt_ddb_idx->fw_ddb_idx = idx;
4086
4087 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
4088 sizeof(struct dev_db_entry));
4089
4090 if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
4091 fw_ddb_entry) == QLA_SUCCESS) {
4092 vfree(nt_ddb_idx);
4093 goto continue_next_nt;
4094 }
4095 list_add_tail(&nt_ddb_idx->list, &list_nt);
4096 } else if (is_reset == RESET_ADAPTER) {
4097 if (qla4xxx_is_session_exists(ha,
4098 fw_ddb_entry) == QLA_SUCCESS)
4099 goto continue_next_nt;
4100 }
4101 4213
4102 /* Create session object, with INVALID_ENTRY, 4214 DEBUG2(ql4_printk(KERN_INFO, ha,
4103 * the targer_id would get set when we issue the login 4215 "Adding DDB to session = 0x%x\n", idx));
4104 */ 4216 if (is_reset == INIT_ADAPTER) {
4105 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, 4217 nt_ddb_idx = vmalloc(fw_idx_size);
4106 ha->host, cmds_max, 4218 if (!nt_ddb_idx)
4107 sizeof(struct ddb_entry), 4219 break;
4108 sizeof(struct ql4_task_data),
4109 initial_cmdsn, INVALID_ENTRY);
4110 if (!cls_sess)
4111 goto exit_ddb_list;
4112 4220
4113 /* 4221 nt_ddb_idx->fw_ddb_idx = idx;
4114 * iscsi_session_setup increments the driver reference
4115 * count which wouldn't let the driver to be unloaded.
4116 * so calling module_put function to decrement the
4117 * reference count.
4118 **/
4119 module_put(qla4xxx_iscsi_transport.owner);
4120 sess = cls_sess->dd_data;
4121 ddb_entry = sess->dd_data;
4122 ddb_entry->sess = cls_sess;
4123 4222
4124 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 4223 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
4125 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
4126 sizeof(struct dev_db_entry)); 4224 sizeof(struct dev_db_entry));
4127 4225
4128 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry); 4226 if (qla4xxx_is_flash_ddb_exists(ha, list_nt,
4129 4227 fw_ddb_entry) == QLA_SUCCESS) {
4130 cls_conn = iscsi_conn_setup(cls_sess, 4228 vfree(nt_ddb_idx);
4131 sizeof(struct qla_conn), 4229 goto continue_next_nt;
4132 conn_id);
4133 if (!cls_conn)
4134 goto exit_ddb_list;
4135
4136 ddb_entry->conn = cls_conn;
4137
4138 /* Setup ep, for displaying attributes in sysfs */
4139 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
4140 if (ep) {
4141 ep->conn = cls_conn;
4142 cls_conn->ep = ep;
4143 } else {
4144 DEBUG2(ql4_printk(KERN_ERR, ha,
4145 "Unable to get ep\n"));
4146 }
4147
4148 /* Update sess/conn params */
4149 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
4150 cls_conn);
4151
4152 if (is_reset == RESET_ADAPTER) {
4153 iscsi_block_session(cls_sess);
4154 /* Use the relogin path to discover new devices
4155 * by short-circuting the logic of setting
4156 * timer to relogin - instead set the flags
4157 * to initiate login right away.
4158 */
4159 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4160 set_bit(DF_RELOGIN, &ddb_entry->flags);
4161 } 4230 }
4231 list_add_tail(&nt_ddb_idx->list, list_nt);
4232 } else if (is_reset == RESET_ADAPTER) {
4233 if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
4234 QLA_SUCCESS)
4235 goto continue_next_nt;
4162 } 4236 }
4237
4238 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
4239 if (ret == QLA_ERROR)
4240 goto exit_nt_list;
4241
4163continue_next_nt: 4242continue_next_nt:
4164 if (next_idx == 0) 4243 if (next_idx == 0)
4165 break; 4244 break;
4166 } 4245 }
4167exit_ddb_list: 4246
4168 qla4xxx_free_nt_list(&list_nt); 4247exit_nt_list:
4169 if (fw_ddb_entry) 4248 if (fw_ddb_entry)
4170 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); 4249 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4250}
4251
4252/**
4253 * qla4xxx_build_ddb_list - Build ddb list and setup sessions
4254 * @ha: pointer to adapter structure
4255 * @is_reset: Is this init path or reset path
4256 *
4257 * Create a list of sendtargets (st) from firmware DDBs, issue send targets
4258 * using connection open, then create the list of normal targets (nt)
4259 * from firmware DDBs. Based on the list of nt setup session and connection
4260 * objects.
4261 **/
4262void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
4263{
4264 uint16_t tmo = 0;
4265 struct list_head list_st, list_nt;
4266 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
4267 unsigned long wtime;
4268
4269 if (!test_bit(AF_LINK_UP, &ha->flags)) {
4270 set_bit(AF_BUILD_DDB_LIST, &ha->flags);
4271 ha->is_reset = is_reset;
4272 return;
4273 }
4274
4275 INIT_LIST_HEAD(&list_st);
4276 INIT_LIST_HEAD(&list_nt);
4277
4278 qla4xxx_build_st_list(ha, &list_st);
4279
4280 /* Before issuing conn open mbox, ensure all IPs states are configured
4281 * Note, conn open fails if IPs are not configured
4282 */
4283 qla4xxx_wait_for_ip_configuration(ha);
4284
4285 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
4286 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
4287 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
4288 }
4289
4290 /* Wait to ensure all sendtargets are done for min 12 sec wait */
4291 tmo = ((ha->def_timeout > LOGIN_TOV) &&
4292 (ha->def_timeout < LOGIN_TOV * 10) ?
4293 ha->def_timeout : LOGIN_TOV);
4294
4295 DEBUG2(ql4_printk(KERN_INFO, ha,
4296 "Default time to wait for build ddb %d\n", tmo));
4297
4298 wtime = jiffies + (HZ * tmo);
4299 do {
4300 if (list_empty(&list_st))
4301 break;
4302
4303 qla4xxx_remove_failed_ddb(ha, &list_st);
4304 schedule_timeout_uninterruptible(HZ / 10);
4305 } while (time_after(wtime, jiffies));
4306
4307 /* Free up the sendtargets list */
4308 qla4xxx_free_ddb_list(&list_st);
4309
4310 qla4xxx_build_nt_list(ha, &list_nt, is_reset);
4311
4312 qla4xxx_free_ddb_list(&list_nt);
4171 4313
4172 qla4xxx_free_ddb_index(ha); 4314 qla4xxx_free_ddb_index(ha);
4173} 4315}
4174 4316
4175
4176/** 4317/**
4177 * qla4xxx_probe_adapter - callback function to probe HBA 4318 * qla4xxx_probe_adapter - callback function to probe HBA
4178 * @pdev: pointer to pci_dev structure 4319 * @pdev: pointer to pci_dev structure
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 26a3fa34a33c..133989b3a9f4 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k10" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k12"
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f85cfa6c47b5..b2c95dbe9d65 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1316,15 +1316,10 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1316 } 1316 }
1317 1317
1318 if (scsi_target_is_busy(starget)) { 1318 if (scsi_target_is_busy(starget)) {
1319 if (list_empty(&sdev->starved_entry)) 1319 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1320 list_add_tail(&sdev->starved_entry,
1321 &shost->starved_list);
1322 return 0; 1320 return 0;
1323 } 1321 }
1324 1322
1325 /* We're OK to process the command, so we can't be starved */
1326 if (!list_empty(&sdev->starved_entry))
1327 list_del_init(&sdev->starved_entry);
1328 return 1; 1323 return 1;
1329} 1324}
1330 1325
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 1b214910b714..f59d4a05ecd7 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3048,7 +3048,8 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
3048 3048
3049 spin_lock_irqsave(shost->host_lock, flags); 3049 spin_lock_irqsave(shost->host_lock, flags);
3050 rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT | 3050 rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
3051 FC_RPORT_DEVLOSS_PENDING); 3051 FC_RPORT_DEVLOSS_PENDING |
3052 FC_RPORT_DEVLOSS_CALLBK_DONE);
3052 spin_unlock_irqrestore(shost->host_lock, flags); 3053 spin_unlock_irqrestore(shost->host_lock, flags);
3053 3054
3054 /* ensure any stgt delete functions are done */ 3055 /* ensure any stgt delete functions are done */
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 02d99982a74d..eacd46bb36b9 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2368,16 +2368,15 @@ static ssize_t
2368sg_proc_write_adio(struct file *filp, const char __user *buffer, 2368sg_proc_write_adio(struct file *filp, const char __user *buffer,
2369 size_t count, loff_t *off) 2369 size_t count, loff_t *off)
2370{ 2370{
2371 int num; 2371 int err;
2372 char buff[11]; 2372 unsigned long num;
2373 2373
2374 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2374 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2375 return -EACCES; 2375 return -EACCES;
2376 num = (count < 10) ? count : 10; 2376 err = kstrtoul_from_user(buffer, count, 0, &num);
2377 if (copy_from_user(buff, buffer, num)) 2377 if (err)
2378 return -EFAULT; 2378 return err;
2379 buff[num] = '\0'; 2379 sg_allow_dio = num ? 1 : 0;
2380 sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
2381 return count; 2380 return count;
2382} 2381}
2383 2382
@@ -2390,17 +2389,15 @@ static ssize_t
2390sg_proc_write_dressz(struct file *filp, const char __user *buffer, 2389sg_proc_write_dressz(struct file *filp, const char __user *buffer,
2391 size_t count, loff_t *off) 2390 size_t count, loff_t *off)
2392{ 2391{
2393 int num; 2392 int err;
2394 unsigned long k = ULONG_MAX; 2393 unsigned long k = ULONG_MAX;
2395 char buff[11];
2396 2394
2397 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2395 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2398 return -EACCES; 2396 return -EACCES;
2399 num = (count < 10) ? count : 10; 2397
2400 if (copy_from_user(buff, buffer, num)) 2398 err = kstrtoul_from_user(buffer, count, 0, &k);
2401 return -EFAULT; 2399 if (err)
2402 buff[num] = '\0'; 2400 return err;
2403 k = simple_strtoul(buff, NULL, 10);
2404 if (k <= 1048576) { /* limit "big buff" to 1 MB */ 2401 if (k <= 1048576) { /* limit "big buff" to 1 MB */
2405 sg_big_buff = k; 2402 sg_big_buff = k;
2406 return count; 2403 return count;
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index b4543f575f46..36d1ed7817eb 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
839 struct sym_lcb *lp = sym_lp(tp, sdev->lun); 839 struct sym_lcb *lp = sym_lp(tp, sdev->lun);
840 unsigned long flags; 840 unsigned long flags;
841 841
842 /* if slave_alloc returned before allocating a sym_lcb, return */
843 if (!lp)
844 return;
845
842 spin_lock_irqsave(np->s.host->host_lock, flags); 846 spin_lock_irqsave(np->s.host->host_lock, flags);
843 847
844 if (lp->busy_itlq || lp->busy_itl) { 848 if (lp->busy_itlq || lp->busy_itl) {
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 8599545cdf9e..ac44af165b27 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -27,8 +27,7 @@
27#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
28#include <scsi/iscsi_proto.h> 28#include <scsi/iscsi_proto.h>
29#include <target/target_core_base.h> 29#include <target/target_core_base.h>
30#include <target/target_core_tmr.h> 30#include <target/target_core_fabric.h>
31#include <target/target_core_transport.h>
32 31
33#include "iscsi_target_core.h" 32#include "iscsi_target_core.h"
34#include "iscsi_target_parameters.h" 33#include "iscsi_target_parameters.h"
@@ -284,8 +283,8 @@ static struct iscsi_np *iscsit_get_np(
284 sock_in6 = (struct sockaddr_in6 *)sockaddr; 283 sock_in6 = (struct sockaddr_in6 *)sockaddr;
285 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr; 284 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
286 285
287 if (!memcmp((void *)&sock_in6->sin6_addr.in6_u, 286 if (!memcmp(&sock_in6->sin6_addr.in6_u,
288 (void *)&sock_in6_e->sin6_addr.in6_u, 287 &sock_in6_e->sin6_addr.in6_u,
289 sizeof(struct in6_addr))) 288 sizeof(struct in6_addr)))
290 ip_match = 1; 289 ip_match = 1;
291 290
@@ -1225,7 +1224,7 @@ static void iscsit_do_crypto_hash_buf(
1225 1224
1226 crypto_hash_init(hash); 1225 crypto_hash_init(hash);
1227 1226
1228 sg_init_one(&sg, (u8 *)buf, payload_length); 1227 sg_init_one(&sg, buf, payload_length);
1229 crypto_hash_update(hash, &sg, payload_length); 1228 crypto_hash_update(hash, &sg, payload_length);
1230 1229
1231 if (padding) { 1230 if (padding) {
@@ -1603,7 +1602,7 @@ static int iscsit_handle_nop_out(
1603 /* 1602 /*
1604 * Attach ping data to struct iscsi_cmd->buf_ptr. 1603 * Attach ping data to struct iscsi_cmd->buf_ptr.
1605 */ 1604 */
1606 cmd->buf_ptr = (void *)ping_data; 1605 cmd->buf_ptr = ping_data;
1607 cmd->buf_ptr_size = payload_length; 1606 cmd->buf_ptr_size = payload_length;
1608 1607
1609 pr_debug("Got %u bytes of NOPOUT ping" 1608 pr_debug("Got %u bytes of NOPOUT ping"
@@ -3197,7 +3196,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3197 end_of_buf = 1; 3196 end_of_buf = 1;
3198 goto eob; 3197 goto eob;
3199 } 3198 }
3200 memcpy((void *)payload + payload_len, buf, len); 3199 memcpy(payload + payload_len, buf, len);
3201 payload_len += len; 3200 payload_len += len;
3202 3201
3203 spin_lock(&tiqn->tiqn_tpg_lock); 3202 spin_lock(&tiqn->tiqn_tpg_lock);
@@ -3229,7 +3228,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3229 end_of_buf = 1; 3228 end_of_buf = 1;
3230 goto eob; 3229 goto eob;
3231 } 3230 }
3232 memcpy((void *)payload + payload_len, buf, len); 3231 memcpy(payload + payload_len, buf, len);
3233 payload_len += len; 3232 payload_len += len;
3234 } 3233 }
3235 spin_unlock(&tpg->tpg_np_lock); 3234 spin_unlock(&tpg->tpg_np_lock);
@@ -3486,7 +3485,7 @@ int iscsi_target_tx_thread(void *arg)
3486 struct iscsi_conn *conn; 3485 struct iscsi_conn *conn;
3487 struct iscsi_queue_req *qr = NULL; 3486 struct iscsi_queue_req *qr = NULL;
3488 struct se_cmd *se_cmd; 3487 struct se_cmd *se_cmd;
3489 struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg; 3488 struct iscsi_thread_set *ts = arg;
3490 /* 3489 /*
3491 * Allow ourselves to be interrupted by SIGINT so that a 3490 * Allow ourselves to be interrupted by SIGINT so that a
3492 * connection recovery / failure event can be triggered externally. 3491 * connection recovery / failure event can be triggered externally.
@@ -3775,7 +3774,7 @@ int iscsi_target_rx_thread(void *arg)
3775 u8 buffer[ISCSI_HDR_LEN], opcode; 3774 u8 buffer[ISCSI_HDR_LEN], opcode;
3776 u32 checksum = 0, digest = 0; 3775 u32 checksum = 0, digest = 0;
3777 struct iscsi_conn *conn = NULL; 3776 struct iscsi_conn *conn = NULL;
3778 struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg; 3777 struct iscsi_thread_set *ts = arg;
3779 struct kvec iov; 3778 struct kvec iov;
3780 /* 3779 /*
3781 * Allow ourselves to be interrupted by SIGINT so that a 3780 * Allow ourselves to be interrupted by SIGINT so that a
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 1cd6ce373b83..db0cf7c8adde 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -82,7 +82,7 @@ static void chap_gen_challenge(
82 unsigned int *c_len) 82 unsigned int *c_len)
83{ 83{
84 unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1]; 84 unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
85 struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol; 85 struct iscsi_chap *chap = conn->auth_protocol;
86 86
87 memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1); 87 memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
88 88
@@ -120,7 +120,7 @@ static struct iscsi_chap *chap_server_open(
120 if (!conn->auth_protocol) 120 if (!conn->auth_protocol)
121 return NULL; 121 return NULL;
122 122
123 chap = (struct iscsi_chap *) conn->auth_protocol; 123 chap = conn->auth_protocol;
124 /* 124 /*
125 * We only support MD5 MDA presently. 125 * We only support MD5 MDA presently.
126 */ 126 */
@@ -165,14 +165,15 @@ static int chap_server_compute_md5(
165 unsigned int *nr_out_len) 165 unsigned int *nr_out_len)
166{ 166{
167 char *endptr; 167 char *endptr;
168 unsigned char id, digest[MD5_SIGNATURE_SIZE]; 168 unsigned long id;
169 unsigned char digest[MD5_SIGNATURE_SIZE];
169 unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2]; 170 unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
170 unsigned char identifier[10], *challenge = NULL; 171 unsigned char identifier[10], *challenge = NULL;
171 unsigned char *challenge_binhex = NULL; 172 unsigned char *challenge_binhex = NULL;
172 unsigned char client_digest[MD5_SIGNATURE_SIZE]; 173 unsigned char client_digest[MD5_SIGNATURE_SIZE];
173 unsigned char server_digest[MD5_SIGNATURE_SIZE]; 174 unsigned char server_digest[MD5_SIGNATURE_SIZE];
174 unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH]; 175 unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
175 struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol; 176 struct iscsi_chap *chap = conn->auth_protocol;
176 struct crypto_hash *tfm; 177 struct crypto_hash *tfm;
177 struct hash_desc desc; 178 struct hash_desc desc;
178 struct scatterlist sg; 179 struct scatterlist sg;
@@ -246,7 +247,7 @@ static int chap_server_compute_md5(
246 goto out; 247 goto out;
247 } 248 }
248 249
249 sg_init_one(&sg, (void *)&chap->id, 1); 250 sg_init_one(&sg, &chap->id, 1);
250 ret = crypto_hash_update(&desc, &sg, 1); 251 ret = crypto_hash_update(&desc, &sg, 1);
251 if (ret < 0) { 252 if (ret < 0) {
252 pr_err("crypto_hash_update() failed for id\n"); 253 pr_err("crypto_hash_update() failed for id\n");
@@ -254,7 +255,7 @@ static int chap_server_compute_md5(
254 goto out; 255 goto out;
255 } 256 }
256 257
257 sg_init_one(&sg, (void *)&auth->password, strlen(auth->password)); 258 sg_init_one(&sg, &auth->password, strlen(auth->password));
258 ret = crypto_hash_update(&desc, &sg, strlen(auth->password)); 259 ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
259 if (ret < 0) { 260 if (ret < 0) {
260 pr_err("crypto_hash_update() failed for password\n"); 261 pr_err("crypto_hash_update() failed for password\n");
@@ -262,7 +263,7 @@ static int chap_server_compute_md5(
262 goto out; 263 goto out;
263 } 264 }
264 265
265 sg_init_one(&sg, (void *)chap->challenge, CHAP_CHALLENGE_LENGTH); 266 sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH);
266 ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH); 267 ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
267 if (ret < 0) { 268 if (ret < 0) {
268 pr_err("crypto_hash_update() failed for challenge\n"); 269 pr_err("crypto_hash_update() failed for challenge\n");
@@ -305,14 +306,17 @@ static int chap_server_compute_md5(
305 } 306 }
306 307
307 if (type == HEX) 308 if (type == HEX)
308 id = (unsigned char)simple_strtoul((char *)&identifier[2], 309 id = simple_strtoul(&identifier[2], &endptr, 0);
309 &endptr, 0);
310 else 310 else
311 id = (unsigned char)simple_strtoul(identifier, &endptr, 0); 311 id = simple_strtoul(identifier, &endptr, 0);
312 if (id > 255) {
313 pr_err("chap identifier: %lu greater than 255\n", id);
314 goto out;
315 }
312 /* 316 /*
313 * RFC 1994 says Identifier is no more than octet (8 bits). 317 * RFC 1994 says Identifier is no more than octet (8 bits).
314 */ 318 */
315 pr_debug("[server] Got CHAP_I=%d\n", id); 319 pr_debug("[server] Got CHAP_I=%lu\n", id);
316 /* 320 /*
317 * Get CHAP_C. 321 * Get CHAP_C.
318 */ 322 */
@@ -351,7 +355,7 @@ static int chap_server_compute_md5(
351 goto out; 355 goto out;
352 } 356 }
353 357
354 sg_init_one(&sg, (void *)&id, 1); 358 sg_init_one(&sg, &id, 1);
355 ret = crypto_hash_update(&desc, &sg, 1); 359 ret = crypto_hash_update(&desc, &sg, 1);
356 if (ret < 0) { 360 if (ret < 0) {
357 pr_err("crypto_hash_update() failed for id\n"); 361 pr_err("crypto_hash_update() failed for id\n");
@@ -359,7 +363,7 @@ static int chap_server_compute_md5(
359 goto out; 363 goto out;
360 } 364 }
361 365
362 sg_init_one(&sg, (void *)auth->password_mutual, 366 sg_init_one(&sg, auth->password_mutual,
363 strlen(auth->password_mutual)); 367 strlen(auth->password_mutual));
364 ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual)); 368 ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
365 if (ret < 0) { 369 if (ret < 0) {
@@ -371,7 +375,7 @@ static int chap_server_compute_md5(
371 /* 375 /*
372 * Convert received challenge to binary hex. 376 * Convert received challenge to binary hex.
373 */ 377 */
374 sg_init_one(&sg, (void *)challenge_binhex, challenge_len); 378 sg_init_one(&sg, challenge_binhex, challenge_len);
375 ret = crypto_hash_update(&desc, &sg, challenge_len); 379 ret = crypto_hash_update(&desc, &sg, challenge_len);
376 if (ret < 0) { 380 if (ret < 0) {
377 pr_err("crypto_hash_update() failed for ma challenge\n"); 381 pr_err("crypto_hash_update() failed for ma challenge\n");
@@ -414,7 +418,7 @@ static int chap_got_response(
414 char *nr_out_ptr, 418 char *nr_out_ptr,
415 unsigned int *nr_out_len) 419 unsigned int *nr_out_len)
416{ 420{
417 struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol; 421 struct iscsi_chap *chap = conn->auth_protocol;
418 422
419 switch (chap->digest_type) { 423 switch (chap->digest_type) {
420 case CHAP_DIGEST_MD5: 424 case CHAP_DIGEST_MD5:
@@ -437,7 +441,7 @@ u32 chap_main_loop(
437 int *in_len, 441 int *in_len,
438 int *out_len) 442 int *out_len)
439{ 443{
440 struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol; 444 struct iscsi_chap *chap = conn->auth_protocol;
441 445
442 if (!chap) { 446 if (!chap) {
443 chap = chap_server_open(conn, auth, in_text, out_text, out_len); 447 chap = chap_server_open(conn, auth, in_text, out_text, out_len);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index db327845e46b..3468caab47a2 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -22,12 +22,8 @@
22#include <linux/configfs.h> 22#include <linux/configfs.h>
23#include <linux/export.h> 23#include <linux/export.h>
24#include <target/target_core_base.h> 24#include <target/target_core_base.h>
25#include <target/target_core_transport.h> 25#include <target/target_core_fabric.h>
26#include <target/target_core_fabric_ops.h>
27#include <target/target_core_fabric_configfs.h> 26#include <target/target_core_fabric_configfs.h>
28#include <target/target_core_fabric_lib.h>
29#include <target/target_core_device.h>
30#include <target/target_core_tpg.h>
31#include <target/target_core_configfs.h> 27#include <target/target_core_configfs.h>
32#include <target/configfs_macros.h> 28#include <target/configfs_macros.h>
33 29
@@ -56,8 +52,7 @@ struct iscsi_portal_group *lio_get_tpg_from_tpg_item(
56{ 52{
57 struct se_portal_group *se_tpg = container_of(to_config_group(item), 53 struct se_portal_group *se_tpg = container_of(to_config_group(item),
58 struct se_portal_group, tpg_group); 54 struct se_portal_group, tpg_group);
59 struct iscsi_portal_group *tpg = 55 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
60 (struct iscsi_portal_group *)se_tpg->se_tpg_fabric_ptr;
61 int ret; 56 int ret;
62 57
63 if (!tpg) { 58 if (!tpg) {
@@ -1225,7 +1220,7 @@ struct se_portal_group *lio_target_tiqn_addtpg(
1225 1220
1226 ret = core_tpg_register( 1221 ret = core_tpg_register(
1227 &lio_target_fabric_configfs->tf_ops, 1222 &lio_target_fabric_configfs->tf_ops,
1228 wwn, &tpg->tpg_se_tpg, (void *)tpg, 1223 wwn, &tpg->tpg_se_tpg, tpg,
1229 TRANSPORT_TPG_TYPE_NORMAL); 1224 TRANSPORT_TPG_TYPE_NORMAL);
1230 if (ret < 0) 1225 if (ret < 0)
1231 return NULL; 1226 return NULL;
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index a19fa5eea88e..f63ea35bc4ae 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -21,8 +21,7 @@
21 21
22#include <scsi/scsi_device.h> 22#include <scsi/scsi_device.h>
23#include <target/target_core_base.h> 23#include <target/target_core_base.h>
24#include <target/target_core_device.h> 24#include <target/target_core_fabric.h>
25#include <target/target_core_transport.h>
26 25
27#include "iscsi_target_core.h" 26#include "iscsi_target_core.h"
28#include "iscsi_target_device.h" 27#include "iscsi_target_device.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index b7ffc3cd40cc..478451167b62 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -21,7 +21,7 @@
21 21
22#include <scsi/iscsi_proto.h> 22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h> 23#include <target/target_core_base.h>
24#include <target/target_core_transport.h> 24#include <target/target_core_fabric.h>
25 25
26#include "iscsi_target_core.h" 26#include "iscsi_target_core.h"
27#include "iscsi_target_seq_pdu_list.h" 27#include "iscsi_target_seq_pdu_list.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 101b1beb3bca..255c0d67e898 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -21,7 +21,7 @@
21#include <linux/list.h> 21#include <linux/list.h>
22#include <scsi/iscsi_proto.h> 22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h> 23#include <target/target_core_base.h>
24#include <target/target_core_transport.h> 24#include <target/target_core_fabric.h>
25 25
26#include "iscsi_target_core.h" 26#include "iscsi_target_core.h"
27#include "iscsi_target_seq_pdu_list.h" 27#include "iscsi_target_seq_pdu_list.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 0b8404c30125..1af1f21af21f 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -21,7 +21,7 @@
21 21
22#include <scsi/iscsi_proto.h> 22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h> 23#include <target/target_core_base.h>
24#include <target/target_core_transport.h> 24#include <target/target_core_fabric.h>
25 25
26#include "iscsi_target_core.h" 26#include "iscsi_target_core.h"
27#include "iscsi_target_datain_values.h" 27#include "iscsi_target_datain_values.h"
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index d734bdec24f9..373b0cc6abd8 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -23,7 +23,7 @@
23#include <linux/crypto.h> 23#include <linux/crypto.h>
24#include <scsi/iscsi_proto.h> 24#include <scsi/iscsi_proto.h>
25#include <target/target_core_base.h> 25#include <target/target_core_base.h>
26#include <target/target_core_transport.h> 26#include <target/target_core_fabric.h>
27 27
28#include "iscsi_target_core.h" 28#include "iscsi_target_core.h"
29#include "iscsi_target_tq.h" 29#include "iscsi_target_tq.h"
@@ -143,7 +143,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
143 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list, 143 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
144 sess_list) { 144 sess_list) {
145 145
146 sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr; 146 sess_p = se_sess->fabric_sess_ptr;
147 spin_lock(&sess_p->conn_lock); 147 spin_lock(&sess_p->conn_lock);
148 if (atomic_read(&sess_p->session_fall_back_to_erl0) || 148 if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
149 atomic_read(&sess_p->session_logout) || 149 atomic_read(&sess_p->session_logout) ||
@@ -151,9 +151,9 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
151 spin_unlock(&sess_p->conn_lock); 151 spin_unlock(&sess_p->conn_lock);
152 continue; 152 continue;
153 } 153 }
154 if (!memcmp((void *)sess_p->isid, (void *)conn->sess->isid, 6) && 154 if (!memcmp(sess_p->isid, conn->sess->isid, 6) &&
155 (!strcmp((void *)sess_p->sess_ops->InitiatorName, 155 (!strcmp(sess_p->sess_ops->InitiatorName,
156 (void *)initiatorname_param->value) && 156 initiatorname_param->value) &&
157 (sess_p->sess_ops->SessionType == sessiontype))) { 157 (sess_p->sess_ops->SessionType == sessiontype))) {
158 atomic_set(&sess_p->session_reinstatement, 1); 158 atomic_set(&sess_p->session_reinstatement, 1);
159 spin_unlock(&sess_p->conn_lock); 159 spin_unlock(&sess_p->conn_lock);
@@ -229,7 +229,7 @@ static int iscsi_login_zero_tsih_s1(
229 229
230 iscsi_login_set_conn_values(sess, conn, pdu->cid); 230 iscsi_login_set_conn_values(sess, conn, pdu->cid);
231 sess->init_task_tag = pdu->itt; 231 sess->init_task_tag = pdu->itt;
232 memcpy((void *)&sess->isid, (void *)pdu->isid, 6); 232 memcpy(&sess->isid, pdu->isid, 6);
233 sess->exp_cmd_sn = pdu->cmdsn; 233 sess->exp_cmd_sn = pdu->cmdsn;
234 INIT_LIST_HEAD(&sess->sess_conn_list); 234 INIT_LIST_HEAD(&sess->sess_conn_list);
235 INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list); 235 INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list);
@@ -440,8 +440,7 @@ static int iscsi_login_non_zero_tsih_s2(
440 atomic_read(&sess_p->session_logout) || 440 atomic_read(&sess_p->session_logout) ||
441 (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) 441 (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
442 continue; 442 continue;
443 if (!memcmp((const void *)sess_p->isid, 443 if (!memcmp(sess_p->isid, pdu->isid, 6) &&
444 (const void *)pdu->isid, 6) &&
445 (sess_p->tsih == pdu->tsih)) { 444 (sess_p->tsih == pdu->tsih)) {
446 iscsit_inc_session_usage_count(sess_p); 445 iscsit_inc_session_usage_count(sess_p);
447 iscsit_stop_time2retain_timer(sess_p); 446 iscsit_stop_time2retain_timer(sess_p);
@@ -654,7 +653,7 @@ static int iscsi_post_login_handler(
654 653
655 spin_lock_bh(&se_tpg->session_lock); 654 spin_lock_bh(&se_tpg->session_lock);
656 __transport_register_session(&sess->tpg->tpg_se_tpg, 655 __transport_register_session(&sess->tpg->tpg_se_tpg,
657 se_sess->se_node_acl, se_sess, (void *)sess); 656 se_sess->se_node_acl, se_sess, sess);
658 pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n"); 657 pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
659 sess->session_state = TARG_SESS_STATE_LOGGED_IN; 658 sess->session_state = TARG_SESS_STATE_LOGGED_IN;
660 659
@@ -811,7 +810,7 @@ int iscsi_target_setup_login_socket(
811 * Setup the np->np_sockaddr from the passed sockaddr setup 810 * Setup the np->np_sockaddr from the passed sockaddr setup
812 * in iscsi_target_configfs.c code.. 811 * in iscsi_target_configfs.c code..
813 */ 812 */
814 memcpy((void *)&np->np_sockaddr, (void *)sockaddr, 813 memcpy(&np->np_sockaddr, sockaddr,
815 sizeof(struct __kernel_sockaddr_storage)); 814 sizeof(struct __kernel_sockaddr_storage));
816 815
817 if (sockaddr->ss_family == AF_INET6) 816 if (sockaddr->ss_family == AF_INET6)
@@ -821,6 +820,7 @@ int iscsi_target_setup_login_socket(
821 /* 820 /*
822 * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY. 821 * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
823 */ 822 */
823 /* FIXME: Someone please explain why this is endian-safe */
824 opt = 1; 824 opt = 1;
825 if (np->np_network_transport == ISCSI_TCP) { 825 if (np->np_network_transport == ISCSI_TCP) {
826 ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, 826 ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
@@ -832,6 +832,7 @@ int iscsi_target_setup_login_socket(
832 } 832 }
833 } 833 }
834 834
835 /* FIXME: Someone please explain why this is endian-safe */
835 ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, 836 ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
836 (char *)&opt, sizeof(opt)); 837 (char *)&opt, sizeof(opt));
837 if (ret < 0) { 838 if (ret < 0) {
@@ -1206,7 +1207,7 @@ out:
1206 1207
1207int iscsi_target_login_thread(void *arg) 1208int iscsi_target_login_thread(void *arg)
1208{ 1209{
1209 struct iscsi_np *np = (struct iscsi_np *)arg; 1210 struct iscsi_np *np = arg;
1210 int ret; 1211 int ret;
1211 1212
1212 allow_signal(SIGINT); 1213 allow_signal(SIGINT);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 98936cb7c294..e89fa7457254 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -21,7 +21,7 @@
21#include <linux/ctype.h> 21#include <linux/ctype.h>
22#include <scsi/iscsi_proto.h> 22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h> 23#include <target/target_core_base.h>
24#include <target/target_core_tpg.h> 24#include <target/target_core_fabric.h>
25 25
26#include "iscsi_target_core.h" 26#include "iscsi_target_core.h"
27#include "iscsi_target_parameters.h" 27#include "iscsi_target_parameters.h"
@@ -732,7 +732,7 @@ static void iscsi_initiatorname_tolower(
732 u32 iqn_size = strlen(param_buf), i; 732 u32 iqn_size = strlen(param_buf), i;
733 733
734 for (i = 0; i < iqn_size; i++) { 734 for (i = 0; i < iqn_size; i++) {
735 c = (char *)&param_buf[i]; 735 c = &param_buf[i];
736 if (!isupper(*c)) 736 if (!isupper(*c))
737 continue; 737 continue;
738 738
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index aeafbe0cd7d1..b3c699c4fe8c 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -19,7 +19,6 @@
19 ******************************************************************************/ 19 ******************************************************************************/
20 20
21#include <target/target_core_base.h> 21#include <target/target_core_base.h>
22#include <target/target_core_transport.h>
23 22
24#include "iscsi_target_core.h" 23#include "iscsi_target_core.h"
25#include "iscsi_target_device.h" 24#include "iscsi_target_device.h"
@@ -135,7 +134,7 @@ extern int iscsit_na_nopin_timeout(
135 spin_lock_bh(&se_nacl->nacl_sess_lock); 134 spin_lock_bh(&se_nacl->nacl_sess_lock);
136 se_sess = se_nacl->nacl_sess; 135 se_sess = se_nacl->nacl_sess;
137 if (se_sess) { 136 if (se_sess) {
138 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 137 sess = se_sess->fabric_sess_ptr;
139 138
140 spin_lock(&sess->conn_lock); 139 spin_lock(&sess->conn_lock);
141 list_for_each_entry(conn, &sess->sess_conn_list, 140 list_for_each_entry(conn, &sess->sess_conn_list,
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index f1db83077e0a..421d6947dc64 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -23,7 +23,6 @@
23#include <linux/export.h> 23#include <linux/export.h>
24#include <scsi/iscsi_proto.h> 24#include <scsi/iscsi_proto.h>
25#include <target/target_core_base.h> 25#include <target/target_core_base.h>
26#include <target/target_core_transport.h>
27#include <target/configfs_macros.h> 26#include <target/configfs_macros.h>
28 27
29#include "iscsi_target_core.h" 28#include "iscsi_target_core.h"
@@ -746,7 +745,7 @@ static ssize_t iscsi_stat_sess_show_attr_node(
746 spin_lock_bh(&se_nacl->nacl_sess_lock); 745 spin_lock_bh(&se_nacl->nacl_sess_lock);
747 se_sess = se_nacl->nacl_sess; 746 se_sess = se_nacl->nacl_sess;
748 if (se_sess) { 747 if (se_sess) {
749 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 748 sess = se_sess->fabric_sess_ptr;
750 if (sess) 749 if (sess)
751 ret = snprintf(page, PAGE_SIZE, "%u\n", 750 ret = snprintf(page, PAGE_SIZE, "%u\n",
752 sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX); 751 sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX);
@@ -770,7 +769,7 @@ static ssize_t iscsi_stat_sess_show_attr_indx(
770 spin_lock_bh(&se_nacl->nacl_sess_lock); 769 spin_lock_bh(&se_nacl->nacl_sess_lock);
771 se_sess = se_nacl->nacl_sess; 770 se_sess = se_nacl->nacl_sess;
772 if (se_sess) { 771 if (se_sess) {
773 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 772 sess = se_sess->fabric_sess_ptr;
774 if (sess) 773 if (sess)
775 ret = snprintf(page, PAGE_SIZE, "%u\n", 774 ret = snprintf(page, PAGE_SIZE, "%u\n",
776 sess->session_index); 775 sess->session_index);
@@ -794,7 +793,7 @@ static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
794 spin_lock_bh(&se_nacl->nacl_sess_lock); 793 spin_lock_bh(&se_nacl->nacl_sess_lock);
795 se_sess = se_nacl->nacl_sess; 794 se_sess = se_nacl->nacl_sess;
796 if (se_sess) { 795 if (se_sess) {
797 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 796 sess = se_sess->fabric_sess_ptr;
798 if (sess) 797 if (sess)
799 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus); 798 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
800 } 799 }
@@ -817,7 +816,7 @@ static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
817 spin_lock_bh(&se_nacl->nacl_sess_lock); 816 spin_lock_bh(&se_nacl->nacl_sess_lock);
818 se_sess = se_nacl->nacl_sess; 817 se_sess = se_nacl->nacl_sess;
819 if (se_sess) { 818 if (se_sess) {
820 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 819 sess = se_sess->fabric_sess_ptr;
821 if (sess) 820 if (sess)
822 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus); 821 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
823 } 822 }
@@ -840,7 +839,7 @@ static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
840 spin_lock_bh(&se_nacl->nacl_sess_lock); 839 spin_lock_bh(&se_nacl->nacl_sess_lock);
841 se_sess = se_nacl->nacl_sess; 840 se_sess = se_nacl->nacl_sess;
842 if (se_sess) { 841 if (se_sess) {
843 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 842 sess = se_sess->fabric_sess_ptr;
844 if (sess) 843 if (sess)
845 ret = snprintf(page, PAGE_SIZE, "%llu\n", 844 ret = snprintf(page, PAGE_SIZE, "%llu\n",
846 (unsigned long long)sess->tx_data_octets); 845 (unsigned long long)sess->tx_data_octets);
@@ -864,7 +863,7 @@ static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
864 spin_lock_bh(&se_nacl->nacl_sess_lock); 863 spin_lock_bh(&se_nacl->nacl_sess_lock);
865 se_sess = se_nacl->nacl_sess; 864 se_sess = se_nacl->nacl_sess;
866 if (se_sess) { 865 if (se_sess) {
867 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 866 sess = se_sess->fabric_sess_ptr;
868 if (sess) 867 if (sess)
869 ret = snprintf(page, PAGE_SIZE, "%llu\n", 868 ret = snprintf(page, PAGE_SIZE, "%llu\n",
870 (unsigned long long)sess->rx_data_octets); 869 (unsigned long long)sess->rx_data_octets);
@@ -888,7 +887,7 @@ static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
888 spin_lock_bh(&se_nacl->nacl_sess_lock); 887 spin_lock_bh(&se_nacl->nacl_sess_lock);
889 se_sess = se_nacl->nacl_sess; 888 se_sess = se_nacl->nacl_sess;
890 if (se_sess) { 889 if (se_sess) {
891 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 890 sess = se_sess->fabric_sess_ptr;
892 if (sess) 891 if (sess)
893 ret = snprintf(page, PAGE_SIZE, "%u\n", 892 ret = snprintf(page, PAGE_SIZE, "%u\n",
894 sess->conn_digest_errors); 893 sess->conn_digest_errors);
@@ -912,7 +911,7 @@ static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
912 spin_lock_bh(&se_nacl->nacl_sess_lock); 911 spin_lock_bh(&se_nacl->nacl_sess_lock);
913 se_sess = se_nacl->nacl_sess; 912 se_sess = se_nacl->nacl_sess;
914 if (se_sess) { 913 if (se_sess) {
915 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 914 sess = se_sess->fabric_sess_ptr;
916 if (sess) 915 if (sess)
917 ret = snprintf(page, PAGE_SIZE, "%u\n", 916 ret = snprintf(page, PAGE_SIZE, "%u\n",
918 sess->conn_timeout_errors); 917 sess->conn_timeout_errors);
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index 490207eacde9..255ed35da815 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -21,7 +21,7 @@
21#include <asm/unaligned.h> 21#include <asm/unaligned.h>
22#include <scsi/iscsi_proto.h> 22#include <scsi/iscsi_proto.h>
23#include <target/target_core_base.h> 23#include <target/target_core_base.h>
24#include <target/target_core_transport.h> 24#include <target/target_core_fabric.h>
25 25
26#include "iscsi_target_core.h" 26#include "iscsi_target_core.h"
27#include "iscsi_target_seq_pdu_list.h" 27#include "iscsi_target_seq_pdu_list.h"
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index d4cf2cd25c44..879d8d0fa3fe 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -19,10 +19,8 @@
19 ******************************************************************************/ 19 ******************************************************************************/
20 20
21#include <target/target_core_base.h> 21#include <target/target_core_base.h>
22#include <target/target_core_transport.h> 22#include <target/target_core_fabric.h>
23#include <target/target_core_fabric_ops.h>
24#include <target/target_core_configfs.h> 23#include <target/target_core_configfs.h>
25#include <target/target_core_tpg.h>
26 24
27#include "iscsi_target_core.h" 25#include "iscsi_target_core.h"
28#include "iscsi_target_erl0.h" 26#include "iscsi_target_erl0.h"
@@ -72,7 +70,7 @@ int iscsit_load_discovery_tpg(void)
72 70
73 ret = core_tpg_register( 71 ret = core_tpg_register(
74 &lio_target_fabric_configfs->tf_ops, 72 &lio_target_fabric_configfs->tf_ops,
75 NULL, &tpg->tpg_se_tpg, (void *)tpg, 73 NULL, &tpg->tpg_se_tpg, tpg,
76 TRANSPORT_TPG_TYPE_DISCOVERY); 74 TRANSPORT_TPG_TYPE_DISCOVERY);
77 if (ret < 0) { 75 if (ret < 0) {
78 kfree(tpg); 76 kfree(tpg);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 02348f727bd4..a05ca1c4f01c 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -22,9 +22,7 @@
22#include <scsi/scsi_tcq.h> 22#include <scsi/scsi_tcq.h>
23#include <scsi/iscsi_proto.h> 23#include <scsi/iscsi_proto.h>
24#include <target/target_core_base.h> 24#include <target/target_core_base.h>
25#include <target/target_core_transport.h> 25#include <target/target_core_fabric.h>
26#include <target/target_core_tmr.h>
27#include <target/target_core_fabric_ops.h>
28#include <target/target_core_configfs.h> 26#include <target/target_core_configfs.h>
29 27
30#include "iscsi_target_core.h" 28#include "iscsi_target_core.h"
@@ -289,7 +287,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
289 } 287 }
290 288
291 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, 289 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
292 (void *)cmd->tmr_req, tcm_function, 290 cmd->tmr_req, tcm_function,
293 GFP_KERNEL); 291 GFP_KERNEL);
294 if (!se_cmd->se_tmr_req) 292 if (!se_cmd->se_tmr_req)
295 goto out; 293 goto out;
@@ -1066,7 +1064,7 @@ static void iscsit_handle_nopin_response_timeout(unsigned long data)
1066 if (tiqn) { 1064 if (tiqn) {
1067 spin_lock_bh(&tiqn->sess_err_stats.lock); 1065 spin_lock_bh(&tiqn->sess_err_stats.lock);
1068 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name, 1066 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
1069 (void *)conn->sess->sess_ops->InitiatorName); 1067 conn->sess->sess_ops->InitiatorName);
1070 tiqn->sess_err_stats.last_sess_failure_type = 1068 tiqn->sess_err_stats.last_sess_failure_type =
1071 ISCSI_SESS_ERR_CXN_TIMEOUT; 1069 ISCSI_SESS_ERR_CXN_TIMEOUT;
1072 tiqn->sess_err_stats.cxn_timeout_errors++; 1070 tiqn->sess_err_stats.cxn_timeout_errors++;
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 81d5832fbbd5..c47ff7f59e57 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -33,14 +33,9 @@
33#include <scsi/scsi_cmnd.h> 33#include <scsi/scsi_cmnd.h>
34 34
35#include <target/target_core_base.h> 35#include <target/target_core_base.h>
36#include <target/target_core_transport.h> 36#include <target/target_core_fabric.h>
37#include <target/target_core_fabric_ops.h>
38#include <target/target_core_fabric_configfs.h> 37#include <target/target_core_fabric_configfs.h>
39#include <target/target_core_fabric_lib.h>
40#include <target/target_core_configfs.h> 38#include <target/target_core_configfs.h>
41#include <target/target_core_device.h>
42#include <target/target_core_tpg.h>
43#include <target/target_core_tmr.h>
44 39
45#include "tcm_loop.h" 40#include "tcm_loop.h"
46 41
@@ -421,11 +416,11 @@ static struct scsi_host_template tcm_loop_driver_template = {
421 .queuecommand = tcm_loop_queuecommand, 416 .queuecommand = tcm_loop_queuecommand,
422 .change_queue_depth = tcm_loop_change_queue_depth, 417 .change_queue_depth = tcm_loop_change_queue_depth,
423 .eh_device_reset_handler = tcm_loop_device_reset, 418 .eh_device_reset_handler = tcm_loop_device_reset,
424 .can_queue = TL_SCSI_CAN_QUEUE, 419 .can_queue = 1024,
425 .this_id = -1, 420 .this_id = -1,
426 .sg_tablesize = TL_SCSI_SG_TABLESIZE, 421 .sg_tablesize = 256,
427 .cmd_per_lun = TL_SCSI_CMD_PER_LUN, 422 .cmd_per_lun = 1024,
428 .max_sectors = TL_SCSI_MAX_SECTORS, 423 .max_sectors = 0xFFFF,
429 .use_clustering = DISABLE_CLUSTERING, 424 .use_clustering = DISABLE_CLUSTERING,
430 .slave_alloc = tcm_loop_slave_alloc, 425 .slave_alloc = tcm_loop_slave_alloc,
431 .slave_configure = tcm_loop_slave_configure, 426 .slave_configure = tcm_loop_slave_configure,
@@ -564,8 +559,7 @@ static char *tcm_loop_get_fabric_name(void)
564 559
565static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg) 560static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
566{ 561{
567 struct tcm_loop_tpg *tl_tpg = 562 struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
568 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
569 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 563 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
570 /* 564 /*
571 * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba() 565 * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
@@ -592,8 +586,7 @@ static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
592 586
593static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg) 587static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
594{ 588{
595 struct tcm_loop_tpg *tl_tpg = 589 struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
596 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
597 /* 590 /*
598 * Return the passed NAA identifier for the SAS Target Port 591 * Return the passed NAA identifier for the SAS Target Port
599 */ 592 */
@@ -602,8 +595,7 @@ static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
602 595
603static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg) 596static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
604{ 597{
605 struct tcm_loop_tpg *tl_tpg = 598 struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
606 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
607 /* 599 /*
608 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83 600 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
609 * to represent the SCSI Target Port. 601 * to represent the SCSI Target Port.
@@ -623,8 +615,7 @@ static u32 tcm_loop_get_pr_transport_id(
623 int *format_code, 615 int *format_code,
624 unsigned char *buf) 616 unsigned char *buf)
625{ 617{
626 struct tcm_loop_tpg *tl_tpg = 618 struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
627 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
628 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 619 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
629 620
630 switch (tl_hba->tl_proto_id) { 621 switch (tl_hba->tl_proto_id) {
@@ -653,8 +644,7 @@ static u32 tcm_loop_get_pr_transport_id_len(
653 struct t10_pr_registration *pr_reg, 644 struct t10_pr_registration *pr_reg,
654 int *format_code) 645 int *format_code)
655{ 646{
656 struct tcm_loop_tpg *tl_tpg = 647 struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
657 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
658 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 648 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
659 649
660 switch (tl_hba->tl_proto_id) { 650 switch (tl_hba->tl_proto_id) {
@@ -687,8 +677,7 @@ static char *tcm_loop_parse_pr_out_transport_id(
687 u32 *out_tid_len, 677 u32 *out_tid_len,
688 char **port_nexus_ptr) 678 char **port_nexus_ptr)
689{ 679{
690 struct tcm_loop_tpg *tl_tpg = 680 struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
691 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
692 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 681 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
693 682
694 switch (tl_hba->tl_proto_id) { 683 switch (tl_hba->tl_proto_id) {
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 6b76c7a22bb0..15a036441471 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -1,16 +1,7 @@
1#define TCM_LOOP_VERSION "v2.1-rc1" 1#define TCM_LOOP_VERSION "v2.1-rc1"
2#define TL_WWN_ADDR_LEN 256 2#define TL_WWN_ADDR_LEN 256
3#define TL_TPGS_PER_HBA 32 3#define TL_TPGS_PER_HBA 32
4/* 4
5 * Defaults for struct scsi_host_template tcm_loop_driver_template
6 *
7 * We use large can_queue and cmd_per_lun here and let TCM enforce
8 * the underlying se_device_t->queue_depth.
9 */
10#define TL_SCSI_CAN_QUEUE 1024
11#define TL_SCSI_CMD_PER_LUN 1024
12#define TL_SCSI_MAX_SECTORS 1024
13#define TL_SCSI_SG_TABLESIZE 256
14/* 5/*
15 * Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len 6 * Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len
16 */ 7 */
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 1dcbef499d6a..1b1edd14f4bf 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -32,13 +32,12 @@
32#include <scsi/scsi_cmnd.h> 32#include <scsi/scsi_cmnd.h>
33 33
34#include <target/target_core_base.h> 34#include <target/target_core_base.h>
35#include <target/target_core_device.h> 35#include <target/target_core_backend.h>
36#include <target/target_core_transport.h> 36#include <target/target_core_fabric.h>
37#include <target/target_core_fabric_ops.h>
38#include <target/target_core_configfs.h> 37#include <target/target_core_configfs.h>
39 38
39#include "target_core_internal.h"
40#include "target_core_alua.h" 40#include "target_core_alua.h"
41#include "target_core_hba.h"
42#include "target_core_ua.h" 41#include "target_core_ua.h"
43 42
44static int core_alua_check_transition(int state, int *primary); 43static int core_alua_check_transition(int state, int *primary);
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 831468b3163d..2f2235edefff 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -29,10 +29,11 @@
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30 30
31#include <target/target_core_base.h> 31#include <target/target_core_base.h>
32#include <target/target_core_transport.h> 32#include <target/target_core_backend.h>
33#include <target/target_core_fabric_ops.h> 33#include <target/target_core_fabric.h>
34
35#include "target_core_internal.h"
34#include "target_core_ua.h" 36#include "target_core_ua.h"
35#include "target_core_cdb.h"
36 37
37static void 38static void
38target_fill_alua_data(struct se_port *port, unsigned char *buf) 39target_fill_alua_data(struct se_port *port, unsigned char *buf)
@@ -94,6 +95,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
94 buf[2] = dev->transport->get_device_rev(dev); 95 buf[2] = dev->transport->get_device_rev(dev);
95 96
96 /* 97 /*
98 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
99 *
100 * SPC4 says:
101 * A RESPONSE DATA FORMAT field set to 2h indicates that the
102 * standard INQUIRY data is in the format defined in this
103 * standard. Response data format values less than 2h are
104 * obsolete. Response data format values greater than 2h are
105 * reserved.
106 */
107 buf[3] = 2;
108
109 /*
97 * Enable SCCS and TPGS fields for Emulated ALUA 110 * Enable SCCS and TPGS fields for Emulated ALUA
98 */ 111 */
99 if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) 112 if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
@@ -115,11 +128,9 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
115 goto out; 128 goto out;
116 } 129 }
117 130
118 snprintf((unsigned char *)&buf[8], 8, "LIO-ORG"); 131 snprintf(&buf[8], 8, "LIO-ORG");
119 snprintf((unsigned char *)&buf[16], 16, "%s", 132 snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model);
120 &dev->se_sub_dev->t10_wwn.model[0]); 133 snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision);
121 snprintf((unsigned char *)&buf[32], 4, "%s",
122 &dev->se_sub_dev->t10_wwn.revision[0]);
123 buf[4] = 31; /* Set additional length to 31 */ 134 buf[4] = 31; /* Set additional length to 31 */
124 135
125out: 136out:
@@ -138,8 +149,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
138 SDF_EMULATED_VPD_UNIT_SERIAL) { 149 SDF_EMULATED_VPD_UNIT_SERIAL) {
139 u32 unit_serial_len; 150 u32 unit_serial_len;
140 151
141 unit_serial_len = 152 unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial);
142 strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
143 unit_serial_len++; /* For NULL Terminator */ 153 unit_serial_len++; /* For NULL Terminator */
144 154
145 if (((len + 4) + unit_serial_len) > cmd->data_length) { 155 if (((len + 4) + unit_serial_len) > cmd->data_length) {
@@ -148,8 +158,8 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
148 buf[3] = (len & 0xff); 158 buf[3] = (len & 0xff);
149 return 0; 159 return 0;
150 } 160 }
151 len += sprintf((unsigned char *)&buf[4], "%s", 161 len += sprintf(&buf[4], "%s",
152 &dev->se_sub_dev->t10_wwn.unit_serial[0]); 162 dev->se_sub_dev->t10_wwn.unit_serial);
153 len++; /* Extra Byte for NULL Terminator */ 163 len++; /* Extra Byte for NULL Terminator */
154 buf[3] = len; 164 buf[3] = len;
155 } 165 }
@@ -279,14 +289,13 @@ check_t10_vend_desc:
279 len += (prod_len + unit_serial_len); 289 len += (prod_len + unit_serial_len);
280 goto check_port; 290 goto check_port;
281 } 291 }
282 id_len += sprintf((unsigned char *)&buf[off+12], 292 id_len += sprintf(&buf[off+12], "%s:%s", prod,
283 "%s:%s", prod,
284 &dev->se_sub_dev->t10_wwn.unit_serial[0]); 293 &dev->se_sub_dev->t10_wwn.unit_serial[0]);
285 } 294 }
286 buf[off] = 0x2; /* ASCII */ 295 buf[off] = 0x2; /* ASCII */
287 buf[off+1] = 0x1; /* T10 Vendor ID */ 296 buf[off+1] = 0x1; /* T10 Vendor ID */
288 buf[off+2] = 0x0; 297 buf[off+2] = 0x0;
289 memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8); 298 memcpy(&buf[off+4], "LIO-ORG", 8);
290 /* Extra Byte for NULL Terminator */ 299 /* Extra Byte for NULL Terminator */
291 id_len++; 300 id_len++;
292 /* Identifier Length */ 301 /* Identifier Length */
diff --git a/drivers/target/target_core_cdb.h b/drivers/target/target_core_cdb.h
deleted file mode 100644
index ad6b1e393001..000000000000
--- a/drivers/target/target_core_cdb.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef TARGET_CORE_CDB_H
2#define TARGET_CORE_CDB_H
3
4int target_emulate_inquiry(struct se_task *task);
5int target_emulate_readcapacity(struct se_task *task);
6int target_emulate_readcapacity_16(struct se_task *task);
7int target_emulate_modesense(struct se_task *task);
8int target_emulate_request_sense(struct se_task *task);
9int target_emulate_unmap(struct se_task *task);
10int target_emulate_write_same(struct se_task *task);
11int target_emulate_synchronize_cache(struct se_task *task);
12int target_emulate_noop(struct se_task *task);
13
14#endif /* TARGET_CORE_CDB_H */
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 93d4f6a1b798..0955bb8979fb 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -39,18 +39,16 @@
39#include <linux/spinlock.h> 39#include <linux/spinlock.h>
40 40
41#include <target/target_core_base.h> 41#include <target/target_core_base.h>
42#include <target/target_core_device.h> 42#include <target/target_core_backend.h>
43#include <target/target_core_transport.h> 43#include <target/target_core_fabric.h>
44#include <target/target_core_fabric_ops.h>
45#include <target/target_core_fabric_configfs.h> 44#include <target/target_core_fabric_configfs.h>
46#include <target/target_core_configfs.h> 45#include <target/target_core_configfs.h>
47#include <target/configfs_macros.h> 46#include <target/configfs_macros.h>
48 47
48#include "target_core_internal.h"
49#include "target_core_alua.h" 49#include "target_core_alua.h"
50#include "target_core_hba.h"
51#include "target_core_pr.h" 50#include "target_core_pr.h"
52#include "target_core_rd.h" 51#include "target_core_rd.h"
53#include "target_core_stat.h"
54 52
55extern struct t10_alua_lu_gp *default_lu_gp; 53extern struct t10_alua_lu_gp *default_lu_gp;
56 54
@@ -1452,7 +1450,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1452 return -ENOMEM; 1450 return -ENOMEM;
1453 1451
1454 orig = opts; 1452 orig = opts;
1455 while ((ptr = strsep(&opts, ",")) != NULL) { 1453 while ((ptr = strsep(&opts, ",\n")) != NULL) {
1456 if (!*ptr) 1454 if (!*ptr)
1457 continue; 1455 continue;
1458 1456
@@ -1631,7 +1629,7 @@ static struct config_item_type target_core_dev_pr_cit = {
1631 1629
1632static ssize_t target_core_show_dev_info(void *p, char *page) 1630static ssize_t target_core_show_dev_info(void *p, char *page)
1633{ 1631{
1634 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1632 struct se_subsystem_dev *se_dev = p;
1635 struct se_hba *hba = se_dev->se_dev_hba; 1633 struct se_hba *hba = se_dev->se_dev_hba;
1636 struct se_subsystem_api *t = hba->transport; 1634 struct se_subsystem_api *t = hba->transport;
1637 int bl = 0; 1635 int bl = 0;
@@ -1659,7 +1657,7 @@ static ssize_t target_core_store_dev_control(
1659 const char *page, 1657 const char *page,
1660 size_t count) 1658 size_t count)
1661{ 1659{
1662 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1660 struct se_subsystem_dev *se_dev = p;
1663 struct se_hba *hba = se_dev->se_dev_hba; 1661 struct se_hba *hba = se_dev->se_dev_hba;
1664 struct se_subsystem_api *t = hba->transport; 1662 struct se_subsystem_api *t = hba->transport;
1665 1663
@@ -1682,7 +1680,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_control = {
1682 1680
1683static ssize_t target_core_show_dev_alias(void *p, char *page) 1681static ssize_t target_core_show_dev_alias(void *p, char *page)
1684{ 1682{
1685 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1683 struct se_subsystem_dev *se_dev = p;
1686 1684
1687 if (!(se_dev->su_dev_flags & SDF_USING_ALIAS)) 1685 if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
1688 return 0; 1686 return 0;
@@ -1695,7 +1693,7 @@ static ssize_t target_core_store_dev_alias(
1695 const char *page, 1693 const char *page,
1696 size_t count) 1694 size_t count)
1697{ 1695{
1698 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1696 struct se_subsystem_dev *se_dev = p;
1699 struct se_hba *hba = se_dev->se_dev_hba; 1697 struct se_hba *hba = se_dev->se_dev_hba;
1700 ssize_t read_bytes; 1698 ssize_t read_bytes;
1701 1699
@@ -1710,6 +1708,9 @@ static ssize_t target_core_store_dev_alias(
1710 read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN, 1708 read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
1711 "%s", page); 1709 "%s", page);
1712 1710
1711 if (se_dev->se_dev_alias[read_bytes - 1] == '\n')
1712 se_dev->se_dev_alias[read_bytes - 1] = '\0';
1713
1713 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n", 1714 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
1714 config_item_name(&hba->hba_group.cg_item), 1715 config_item_name(&hba->hba_group.cg_item),
1715 config_item_name(&se_dev->se_dev_group.cg_item), 1716 config_item_name(&se_dev->se_dev_group.cg_item),
@@ -1728,7 +1729,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_alias = {
1728 1729
1729static ssize_t target_core_show_dev_udev_path(void *p, char *page) 1730static ssize_t target_core_show_dev_udev_path(void *p, char *page)
1730{ 1731{
1731 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1732 struct se_subsystem_dev *se_dev = p;
1732 1733
1733 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) 1734 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
1734 return 0; 1735 return 0;
@@ -1741,7 +1742,7 @@ static ssize_t target_core_store_dev_udev_path(
1741 const char *page, 1742 const char *page,
1742 size_t count) 1743 size_t count)
1743{ 1744{
1744 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1745 struct se_subsystem_dev *se_dev = p;
1745 struct se_hba *hba = se_dev->se_dev_hba; 1746 struct se_hba *hba = se_dev->se_dev_hba;
1746 ssize_t read_bytes; 1747 ssize_t read_bytes;
1747 1748
@@ -1756,6 +1757,9 @@ static ssize_t target_core_store_dev_udev_path(
1756 read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN, 1757 read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
1757 "%s", page); 1758 "%s", page);
1758 1759
1760 if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n')
1761 se_dev->se_dev_udev_path[read_bytes - 1] = '\0';
1762
1759 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n", 1763 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
1760 config_item_name(&hba->hba_group.cg_item), 1764 config_item_name(&hba->hba_group.cg_item),
1761 config_item_name(&se_dev->se_dev_group.cg_item), 1765 config_item_name(&se_dev->se_dev_group.cg_item),
@@ -1777,7 +1781,7 @@ static ssize_t target_core_store_dev_enable(
1777 const char *page, 1781 const char *page,
1778 size_t count) 1782 size_t count)
1779{ 1783{
1780 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1784 struct se_subsystem_dev *se_dev = p;
1781 struct se_device *dev; 1785 struct se_device *dev;
1782 struct se_hba *hba = se_dev->se_dev_hba; 1786 struct se_hba *hba = se_dev->se_dev_hba;
1783 struct se_subsystem_api *t = hba->transport; 1787 struct se_subsystem_api *t = hba->transport;
@@ -1822,7 +1826,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_enable = {
1822static ssize_t target_core_show_alua_lu_gp(void *p, char *page) 1826static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
1823{ 1827{
1824 struct se_device *dev; 1828 struct se_device *dev;
1825 struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p; 1829 struct se_subsystem_dev *su_dev = p;
1826 struct config_item *lu_ci; 1830 struct config_item *lu_ci;
1827 struct t10_alua_lu_gp *lu_gp; 1831 struct t10_alua_lu_gp *lu_gp;
1828 struct t10_alua_lu_gp_member *lu_gp_mem; 1832 struct t10_alua_lu_gp_member *lu_gp_mem;
@@ -1860,7 +1864,7 @@ static ssize_t target_core_store_alua_lu_gp(
1860 size_t count) 1864 size_t count)
1861{ 1865{
1862 struct se_device *dev; 1866 struct se_device *dev;
1863 struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p; 1867 struct se_subsystem_dev *su_dev = p;
1864 struct se_hba *hba = su_dev->se_dev_hba; 1868 struct se_hba *hba = su_dev->se_dev_hba;
1865 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL; 1869 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
1866 struct t10_alua_lu_gp_member *lu_gp_mem; 1870 struct t10_alua_lu_gp_member *lu_gp_mem;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 9b8639425472..0c5992f0d946 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -42,13 +42,11 @@
42#include <scsi/scsi_device.h> 42#include <scsi/scsi_device.h>
43 43
44#include <target/target_core_base.h> 44#include <target/target_core_base.h>
45#include <target/target_core_device.h> 45#include <target/target_core_backend.h>
46#include <target/target_core_tpg.h> 46#include <target/target_core_fabric.h>
47#include <target/target_core_transport.h>
48#include <target/target_core_fabric_ops.h>
49 47
48#include "target_core_internal.h"
50#include "target_core_alua.h" 49#include "target_core_alua.h"
51#include "target_core_hba.h"
52#include "target_core_pr.h" 50#include "target_core_pr.h"
53#include "target_core_ua.h" 51#include "target_core_ua.h"
54 52
@@ -1134,8 +1132,6 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1134 */ 1132 */
1135int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) 1133int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1136{ 1134{
1137 u32 orig_queue_depth = dev->queue_depth;
1138
1139 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1135 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1140 pr_err("dev[%p]: Unable to change SE Device TCQ while" 1136 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1141 " dev_export_obj: %d count exists\n", dev, 1137 " dev_export_obj: %d count exists\n", dev,
@@ -1169,11 +1165,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1169 } 1165 }
1170 1166
1171 dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth; 1167 dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1172 if (queue_depth > orig_queue_depth)
1173 atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
1174 else if (queue_depth < orig_queue_depth)
1175 atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
1176
1177 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", 1168 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1178 dev, queue_depth); 1169 dev, queue_depth);
1179 return 0; 1170 return 0;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 09b6f8729f91..4f77cce22646 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -36,18 +36,14 @@
36#include <linux/configfs.h> 36#include <linux/configfs.h>
37 37
38#include <target/target_core_base.h> 38#include <target/target_core_base.h>
39#include <target/target_core_device.h> 39#include <target/target_core_fabric.h>
40#include <target/target_core_tpg.h>
41#include <target/target_core_transport.h>
42#include <target/target_core_fabric_ops.h>
43#include <target/target_core_fabric_configfs.h> 40#include <target/target_core_fabric_configfs.h>
44#include <target/target_core_configfs.h> 41#include <target/target_core_configfs.h>
45#include <target/configfs_macros.h> 42#include <target/configfs_macros.h>
46 43
44#include "target_core_internal.h"
47#include "target_core_alua.h" 45#include "target_core_alua.h"
48#include "target_core_hba.h"
49#include "target_core_pr.h" 46#include "target_core_pr.h"
50#include "target_core_stat.h"
51 47
52#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ 48#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
53static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ 49static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index ec4249be617e..283a36e464e6 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -34,13 +34,10 @@
34#include <scsi/scsi_cmnd.h> 34#include <scsi/scsi_cmnd.h>
35 35
36#include <target/target_core_base.h> 36#include <target/target_core_base.h>
37#include <target/target_core_device.h> 37#include <target/target_core_fabric.h>
38#include <target/target_core_transport.h>
39#include <target/target_core_fabric_lib.h>
40#include <target/target_core_fabric_ops.h>
41#include <target/target_core_configfs.h> 38#include <target/target_core_configfs.h>
42 39
43#include "target_core_hba.h" 40#include "target_core_internal.h"
44#include "target_core_pr.h" 41#include "target_core_pr.h"
45 42
46/* 43/*
@@ -402,7 +399,7 @@ char *iscsi_parse_pr_out_transport_id(
402 add_len = ((buf[2] >> 8) & 0xff); 399 add_len = ((buf[2] >> 8) & 0xff);
403 add_len |= (buf[3] & 0xff); 400 add_len |= (buf[3] & 0xff);
404 401
405 tid_len = strlen((char *)&buf[4]); 402 tid_len = strlen(&buf[4]);
406 tid_len += 4; /* Add four bytes for iSCSI Transport ID header */ 403 tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
407 tid_len += 1; /* Add one byte for NULL terminator */ 404 tid_len += 1; /* Add one byte for NULL terminator */
408 padding = ((-tid_len) & 3); 405 padding = ((-tid_len) & 3);
@@ -423,11 +420,11 @@ char *iscsi_parse_pr_out_transport_id(
423 * format. 420 * format.
424 */ 421 */
425 if (format_code == 0x40) { 422 if (format_code == 0x40) {
426 p = strstr((char *)&buf[4], ",i,0x"); 423 p = strstr(&buf[4], ",i,0x");
427 if (!p) { 424 if (!p) {
428 pr_err("Unable to locate \",i,0x\" seperator" 425 pr_err("Unable to locate \",i,0x\" seperator"
429 " for Initiator port identifier: %s\n", 426 " for Initiator port identifier: %s\n",
430 (char *)&buf[4]); 427 &buf[4]);
431 return NULL; 428 return NULL;
432 } 429 }
433 *p = '\0'; /* Terminate iSCSI Name */ 430 *p = '\0'; /* Terminate iSCSI Name */
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index b4864fba4ef0..7ed58e2df791 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -37,8 +37,7 @@
37#include <scsi/scsi_host.h> 37#include <scsi/scsi_host.h>
38 38
39#include <target/target_core_base.h> 39#include <target/target_core_base.h>
40#include <target/target_core_device.h> 40#include <target/target_core_backend.h>
41#include <target/target_core_transport.h>
42 41
43#include "target_core_file.h" 42#include "target_core_file.h"
44 43
@@ -86,7 +85,7 @@ static void fd_detach_hba(struct se_hba *hba)
86static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) 85static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
87{ 86{
88 struct fd_dev *fd_dev; 87 struct fd_dev *fd_dev;
89 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; 88 struct fd_host *fd_host = hba->hba_ptr;
90 89
91 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); 90 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
92 if (!fd_dev) { 91 if (!fd_dev) {
@@ -114,8 +113,8 @@ static struct se_device *fd_create_virtdevice(
114 struct se_device *dev; 113 struct se_device *dev;
115 struct se_dev_limits dev_limits; 114 struct se_dev_limits dev_limits;
116 struct queue_limits *limits; 115 struct queue_limits *limits;
117 struct fd_dev *fd_dev = (struct fd_dev *) p; 116 struct fd_dev *fd_dev = p;
118 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; 117 struct fd_host *fd_host = hba->hba_ptr;
119 mm_segment_t old_fs; 118 mm_segment_t old_fs;
120 struct file *file; 119 struct file *file;
121 struct inode *inode = NULL; 120 struct inode *inode = NULL;
@@ -240,7 +239,7 @@ fail:
240 */ 239 */
241static void fd_free_device(void *p) 240static void fd_free_device(void *p)
242{ 241{
243 struct fd_dev *fd_dev = (struct fd_dev *) p; 242 struct fd_dev *fd_dev = p;
244 243
245 if (fd_dev->fd_file) { 244 if (fd_dev->fd_file) {
246 filp_close(fd_dev->fd_file, NULL); 245 filp_close(fd_dev->fd_file, NULL);
@@ -498,7 +497,7 @@ static ssize_t fd_set_configfs_dev_params(
498 497
499 orig = opts; 498 orig = opts;
500 499
501 while ((ptr = strsep(&opts, ",")) != NULL) { 500 while ((ptr = strsep(&opts, ",\n")) != NULL) {
502 if (!*ptr) 501 if (!*ptr)
503 continue; 502 continue;
504 503
@@ -559,7 +558,7 @@ out:
559 558
560static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) 559static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
561{ 560{
562 struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr; 561 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
563 562
564 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { 563 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
565 pr_err("Missing fd_dev_name=\n"); 564 pr_err("Missing fd_dev_name=\n");
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index c68019d6c406..3dd1bd4b6f71 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -37,11 +37,10 @@
37#include <net/tcp.h> 37#include <net/tcp.h>
38 38
39#include <target/target_core_base.h> 39#include <target/target_core_base.h>
40#include <target/target_core_device.h> 40#include <target/target_core_backend.h>
41#include <target/target_core_tpg.h> 41#include <target/target_core_fabric.h>
42#include <target/target_core_transport.h>
43 42
44#include "target_core_hba.h" 43#include "target_core_internal.h"
45 44
46static LIST_HEAD(subsystem_list); 45static LIST_HEAD(subsystem_list);
47static DEFINE_MUTEX(subsystem_mutex); 46static DEFINE_MUTEX(subsystem_mutex);
diff --git a/drivers/target/target_core_hba.h b/drivers/target/target_core_hba.h
deleted file mode 100644
index bb0fea5f730c..000000000000
--- a/drivers/target/target_core_hba.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef TARGET_CORE_HBA_H
2#define TARGET_CORE_HBA_H
3
4extern struct se_hba *core_alloc_hba(const char *, u32, u32);
5extern int core_delete_hba(struct se_hba *);
6
7#endif /* TARGET_CORE_HBA_H */
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 4aa992204438..cc8e6b58ef20 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -42,8 +42,7 @@
42#include <scsi/scsi_host.h> 42#include <scsi/scsi_host.h>
43 43
44#include <target/target_core_base.h> 44#include <target/target_core_base.h>
45#include <target/target_core_device.h> 45#include <target/target_core_backend.h>
46#include <target/target_core_transport.h>
47 46
48#include "target_core_iblock.h" 47#include "target_core_iblock.h"
49 48
@@ -391,7 +390,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
391 390
392 orig = opts; 391 orig = opts;
393 392
394 while ((ptr = strsep(&opts, ",")) != NULL) { 393 while ((ptr = strsep(&opts, ",\n")) != NULL) {
395 if (!*ptr) 394 if (!*ptr)
396 continue; 395 continue;
397 396
@@ -465,7 +464,7 @@ static ssize_t iblock_show_configfs_dev_params(
465 if (bd) { 464 if (bd) {
466 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", 465 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
467 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? 466 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
468 "" : (bd->bd_holder == (struct iblock_dev *)ibd) ? 467 "" : (bd->bd_holder == ibd) ?
469 "CLAIMED: IBLOCK" : "CLAIMED: OS"); 468 "CLAIMED: IBLOCK" : "CLAIMED: OS");
470 } else { 469 } else {
471 bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); 470 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
new file mode 100644
index 000000000000..26f135e94f6e
--- /dev/null
+++ b/drivers/target/target_core_internal.h
@@ -0,0 +1,123 @@
1#ifndef TARGET_CORE_INTERNAL_H
2#define TARGET_CORE_INTERNAL_H
3
4/* target_core_alua.c */
5extern struct t10_alua_lu_gp *default_lu_gp;
6
7/* target_core_cdb.c */
8int target_emulate_inquiry(struct se_task *task);
9int target_emulate_readcapacity(struct se_task *task);
10int target_emulate_readcapacity_16(struct se_task *task);
11int target_emulate_modesense(struct se_task *task);
12int target_emulate_request_sense(struct se_task *task);
13int target_emulate_unmap(struct se_task *task);
14int target_emulate_write_same(struct se_task *task);
15int target_emulate_synchronize_cache(struct se_task *task);
16int target_emulate_noop(struct se_task *task);
17
18/* target_core_device.c */
19struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
20int core_free_device_list_for_node(struct se_node_acl *,
21 struct se_portal_group *);
22void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
23void core_update_device_list_access(u32, u32, struct se_node_acl *);
24int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *,
25 u32, u32, struct se_node_acl *, struct se_portal_group *, int);
26void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
27int core_dev_export(struct se_device *, struct se_portal_group *,
28 struct se_lun *);
29void core_dev_unexport(struct se_device *, struct se_portal_group *,
30 struct se_lun *);
31int target_report_luns(struct se_task *);
32void se_release_device_for_hba(struct se_device *);
33void se_release_vpd_for_dev(struct se_device *);
34int se_free_virtual_device(struct se_device *, struct se_hba *);
35int se_dev_check_online(struct se_device *);
36int se_dev_check_shutdown(struct se_device *);
37void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
38int se_dev_set_task_timeout(struct se_device *, u32);
39int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
40int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
41int se_dev_set_unmap_granularity(struct se_device *, u32);
42int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
43int se_dev_set_emulate_dpo(struct se_device *, int);
44int se_dev_set_emulate_fua_write(struct se_device *, int);
45int se_dev_set_emulate_fua_read(struct se_device *, int);
46int se_dev_set_emulate_write_cache(struct se_device *, int);
47int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
48int se_dev_set_emulate_tas(struct se_device *, int);
49int se_dev_set_emulate_tpu(struct se_device *, int);
50int se_dev_set_emulate_tpws(struct se_device *, int);
51int se_dev_set_enforce_pr_isids(struct se_device *, int);
52int se_dev_set_is_nonrot(struct se_device *, int);
53int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
54int se_dev_set_queue_depth(struct se_device *, u32);
55int se_dev_set_max_sectors(struct se_device *, u32);
56int se_dev_set_optimal_sectors(struct se_device *, u32);
57int se_dev_set_block_size(struct se_device *, u32);
58struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
59 struct se_device *, u32);
60int core_dev_del_lun(struct se_portal_group *, u32);
61struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
62struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
63 u32, char *, int *);
64int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
65 struct se_lun_acl *, u32, u32);
66int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
67 struct se_lun *, struct se_lun_acl *);
68void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
69 struct se_lun_acl *lacl);
70int core_dev_setup_virtual_lun0(void);
71void core_dev_release_virtual_lun0(void);
72
73/* target_core_hba.c */
74struct se_hba *core_alloc_hba(const char *, u32, u32);
75int core_delete_hba(struct se_hba *);
76
77/* target_core_tmr.c */
78int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
79 struct list_head *, struct se_cmd *);
80
81/* target_core_tpg.c */
82extern struct se_device *g_lun0_dev;
83
84struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
85 const char *);
86struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
87 unsigned char *);
88void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
89void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
90struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
91int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *,
92 u32, void *);
93struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32, int *);
94int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
95
96/* target_core_transport.c */
97extern struct kmem_cache *se_tmr_req_cache;
98
99int init_se_kmem_caches(void);
100void release_se_kmem_caches(void);
101u32 scsi_get_new_index(scsi_index_t);
102void transport_subsystem_check_init(void);
103void transport_cmd_finish_abort(struct se_cmd *, int);
104void __transport_remove_task_from_execute_queue(struct se_task *,
105 struct se_device *);
106unsigned char *transport_dump_cmd_direction(struct se_cmd *);
107void transport_dump_dev_state(struct se_device *, char *, int *);
108void transport_dump_dev_info(struct se_device *, struct se_lun *,
109 unsigned long long, char *, int *);
110void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
111int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
112int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
113int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
114bool target_stop_task(struct se_task *task, unsigned long *flags);
115int transport_clear_lun_from_sessions(struct se_lun *);
116void transport_send_task_abort(struct se_cmd *);
117
118/* target_core_stat.c */
119void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
120void target_stat_setup_port_default_groups(struct se_lun *);
121void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
122
123#endif /* TARGET_CORE_INTERNAL_H */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 95dee7074aeb..429ad7291664 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -33,14 +33,11 @@
33#include <asm/unaligned.h> 33#include <asm/unaligned.h>
34 34
35#include <target/target_core_base.h> 35#include <target/target_core_base.h>
36#include <target/target_core_device.h> 36#include <target/target_core_backend.h>
37#include <target/target_core_tmr.h> 37#include <target/target_core_fabric.h>
38#include <target/target_core_tpg.h>
39#include <target/target_core_transport.h>
40#include <target/target_core_fabric_ops.h>
41#include <target/target_core_configfs.h> 38#include <target/target_core_configfs.h>
42 39
43#include "target_core_hba.h" 40#include "target_core_internal.h"
44#include "target_core_pr.h" 41#include "target_core_pr.h"
45#include "target_core_ua.h" 42#include "target_core_ua.h"
46 43
@@ -2984,21 +2981,6 @@ static void core_scsi3_release_preempt_and_abort(
2984 } 2981 }
2985} 2982}
2986 2983
2987int core_scsi3_check_cdb_abort_and_preempt(
2988 struct list_head *preempt_and_abort_list,
2989 struct se_cmd *cmd)
2990{
2991 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
2992
2993 list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
2994 pr_reg_abort_list) {
2995 if (pr_reg->pr_res_key == cmd->pr_res_key)
2996 return 0;
2997 }
2998
2999 return 1;
3000}
3001
3002static int core_scsi3_pro_preempt( 2984static int core_scsi3_pro_preempt(
3003 struct se_cmd *cmd, 2985 struct se_cmd *cmd,
3004 int type, 2986 int type,
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index b97f6940dd05..7a233feb7e99 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -60,8 +60,6 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
60 struct se_node_acl *); 60 struct se_node_acl *);
61extern void core_scsi3_free_all_registrations(struct se_device *); 61extern void core_scsi3_free_all_registrations(struct se_device *);
62extern unsigned char *core_scsi3_pr_dump_type(int); 62extern unsigned char *core_scsi3_pr_dump_type(int);
63extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
64 struct se_cmd *);
65 63
66extern int target_scsi3_emulate_pr_in(struct se_task *task); 64extern int target_scsi3_emulate_pr_in(struct se_task *task);
67extern int target_scsi3_emulate_pr_out(struct se_task *task); 65extern int target_scsi3_emulate_pr_out(struct se_task *task);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 8b15e56b0384..d35467d42e12 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -44,8 +44,7 @@
44#include <scsi/scsi_tcq.h> 44#include <scsi/scsi_tcq.h>
45 45
46#include <target/target_core_base.h> 46#include <target/target_core_base.h>
47#include <target/target_core_device.h> 47#include <target/target_core_backend.h>
48#include <target/target_core_transport.h>
49 48
50#include "target_core_pscsi.h" 49#include "target_core_pscsi.h"
51 50
@@ -105,7 +104,7 @@ static void pscsi_detach_hba(struct se_hba *hba)
105 104
106static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) 105static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
107{ 106{
108 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; 107 struct pscsi_hba_virt *phv = hba->hba_ptr;
109 struct Scsi_Host *sh = phv->phv_lld_host; 108 struct Scsi_Host *sh = phv->phv_lld_host;
110 /* 109 /*
111 * Release the struct Scsi_Host 110 * Release the struct Scsi_Host
@@ -351,7 +350,6 @@ static struct se_device *pscsi_add_device_to_list(
351 * scsi_device_put() and the pdv->pdv_sd cleared. 350 * scsi_device_put() and the pdv->pdv_sd cleared.
352 */ 351 */
353 pdv->pdv_sd = sd; 352 pdv->pdv_sd = sd;
354
355 dev = transport_add_device_to_core_hba(hba, &pscsi_template, 353 dev = transport_add_device_to_core_hba(hba, &pscsi_template,
356 se_dev, dev_flags, pdv, 354 se_dev, dev_flags, pdv,
357 &dev_limits, NULL, NULL); 355 &dev_limits, NULL, NULL);
@@ -406,7 +404,7 @@ static struct se_device *pscsi_create_type_disk(
406 __releases(sh->host_lock) 404 __releases(sh->host_lock)
407{ 405{
408 struct se_device *dev; 406 struct se_device *dev;
409 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; 407 struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
410 struct Scsi_Host *sh = sd->host; 408 struct Scsi_Host *sh = sd->host;
411 struct block_device *bd; 409 struct block_device *bd;
412 u32 dev_flags = 0; 410 u32 dev_flags = 0;
@@ -454,7 +452,7 @@ static struct se_device *pscsi_create_type_rom(
454 __releases(sh->host_lock) 452 __releases(sh->host_lock)
455{ 453{
456 struct se_device *dev; 454 struct se_device *dev;
457 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; 455 struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
458 struct Scsi_Host *sh = sd->host; 456 struct Scsi_Host *sh = sd->host;
459 u32 dev_flags = 0; 457 u32 dev_flags = 0;
460 458
@@ -489,7 +487,7 @@ static struct se_device *pscsi_create_type_other(
489 __releases(sh->host_lock) 487 __releases(sh->host_lock)
490{ 488{
491 struct se_device *dev; 489 struct se_device *dev;
492 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; 490 struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
493 struct Scsi_Host *sh = sd->host; 491 struct Scsi_Host *sh = sd->host;
494 u32 dev_flags = 0; 492 u32 dev_flags = 0;
495 493
@@ -510,10 +508,10 @@ static struct se_device *pscsi_create_virtdevice(
510 struct se_subsystem_dev *se_dev, 508 struct se_subsystem_dev *se_dev,
511 void *p) 509 void *p)
512{ 510{
513 struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p; 511 struct pscsi_dev_virt *pdv = p;
514 struct se_device *dev; 512 struct se_device *dev;
515 struct scsi_device *sd; 513 struct scsi_device *sd;
516 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; 514 struct pscsi_hba_virt *phv = hba->hba_ptr;
517 struct Scsi_Host *sh = phv->phv_lld_host; 515 struct Scsi_Host *sh = phv->phv_lld_host;
518 int legacy_mode_enable = 0; 516 int legacy_mode_enable = 0;
519 517
@@ -818,7 +816,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
818 816
819 orig = opts; 817 orig = opts;
820 818
821 while ((ptr = strsep(&opts, ",")) != NULL) { 819 while ((ptr = strsep(&opts, ",\n")) != NULL) {
822 if (!*ptr) 820 if (!*ptr)
823 continue; 821 continue;
824 822
@@ -1144,7 +1142,7 @@ static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
1144{ 1142{
1145 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 1143 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1146 1144
1147 return (unsigned char *)&pt->pscsi_sense[0]; 1145 return pt->pscsi_sense;
1148} 1146}
1149 1147
1150/* pscsi_get_device_rev(): 1148/* pscsi_get_device_rev():
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 02e51faa2f4e..8b68f7b82631 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -37,9 +37,7 @@
37#include <scsi/scsi_host.h> 37#include <scsi/scsi_host.h>
38 38
39#include <target/target_core_base.h> 39#include <target/target_core_base.h>
40#include <target/target_core_device.h> 40#include <target/target_core_backend.h>
41#include <target/target_core_transport.h>
42#include <target/target_core_fabric_ops.h>
43 41
44#include "target_core_rd.h" 42#include "target_core_rd.h"
45 43
@@ -474,7 +472,7 @@ static ssize_t rd_set_configfs_dev_params(
474 472
475 orig = opts; 473 orig = opts;
476 474
477 while ((ptr = strsep(&opts, ",")) != NULL) { 475 while ((ptr = strsep(&opts, ",\n")) != NULL) {
478 if (!*ptr) 476 if (!*ptr)
479 continue; 477 continue;
480 478
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 874152aed94a..f8c2d2cc3431 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -43,12 +43,12 @@
43#include <scsi/scsi_host.h> 43#include <scsi/scsi_host.h>
44 44
45#include <target/target_core_base.h> 45#include <target/target_core_base.h>
46#include <target/target_core_transport.h> 46#include <target/target_core_backend.h>
47#include <target/target_core_fabric_ops.h> 47#include <target/target_core_fabric.h>
48#include <target/target_core_configfs.h> 48#include <target/target_core_configfs.h>
49#include <target/configfs_macros.h> 49#include <target/configfs_macros.h>
50 50
51#include "target_core_hba.h" 51#include "target_core_internal.h"
52 52
53#ifndef INITIAL_JIFFIES 53#ifndef INITIAL_JIFFIES
54#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) 54#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
@@ -1755,8 +1755,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(
1755 /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */ 1755 /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
1756 memset(buf, 0, 64); 1756 memset(buf, 0, 64);
1757 if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) 1757 if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL)
1758 tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 1758 tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, buf, 64);
1759 (unsigned char *)&buf[0], 64);
1760 1759
1761 ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf); 1760 ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
1762 spin_unlock_irq(&nacl->nacl_sess_lock); 1761 spin_unlock_irq(&nacl->nacl_sess_lock);
diff --git a/drivers/target/target_core_stat.h b/drivers/target/target_core_stat.h
deleted file mode 100644
index 86c252f9ea47..000000000000
--- a/drivers/target/target_core_stat.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef TARGET_CORE_STAT_H
2#define TARGET_CORE_STAT_H
3
4extern void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
5extern void target_stat_setup_port_default_groups(struct se_lun *);
6extern void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
7
8#endif /*** TARGET_CORE_STAT_H ***/
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 684522805a1f..dcb0618c9388 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -32,12 +32,11 @@
32#include <scsi/scsi_cmnd.h> 32#include <scsi/scsi_cmnd.h>
33 33
34#include <target/target_core_base.h> 34#include <target/target_core_base.h>
35#include <target/target_core_device.h> 35#include <target/target_core_backend.h>
36#include <target/target_core_tmr.h> 36#include <target/target_core_fabric.h>
37#include <target/target_core_transport.h>
38#include <target/target_core_fabric_ops.h>
39#include <target/target_core_configfs.h> 37#include <target/target_core_configfs.h>
40 38
39#include "target_core_internal.h"
41#include "target_core_alua.h" 40#include "target_core_alua.h"
42#include "target_core_pr.h" 41#include "target_core_pr.h"
43 42
@@ -101,6 +100,21 @@ static void core_tmr_handle_tas_abort(
101 transport_cmd_finish_abort(cmd, 0); 100 transport_cmd_finish_abort(cmd, 0);
102} 101}
103 102
103static int target_check_cdb_and_preempt(struct list_head *list,
104 struct se_cmd *cmd)
105{
106 struct t10_pr_registration *reg;
107
108 if (!list)
109 return 0;
110 list_for_each_entry(reg, list, pr_reg_abort_list) {
111 if (reg->pr_res_key == cmd->pr_res_key)
112 return 0;
113 }
114
115 return 1;
116}
117
104static void core_tmr_drain_tmr_list( 118static void core_tmr_drain_tmr_list(
105 struct se_device *dev, 119 struct se_device *dev,
106 struct se_tmr_req *tmr, 120 struct se_tmr_req *tmr,
@@ -132,9 +146,7 @@ static void core_tmr_drain_tmr_list(
132 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action 146 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
133 * skip non regisration key matching TMRs. 147 * skip non regisration key matching TMRs.
134 */ 148 */
135 if (preempt_and_abort_list && 149 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
136 (core_scsi3_check_cdb_abort_and_preempt(
137 preempt_and_abort_list, cmd) != 0))
138 continue; 150 continue;
139 151
140 spin_lock(&cmd->t_state_lock); 152 spin_lock(&cmd->t_state_lock);
@@ -211,9 +223,7 @@ static void core_tmr_drain_task_list(
211 * For PREEMPT_AND_ABORT usage, only process commands 223 * For PREEMPT_AND_ABORT usage, only process commands
212 * with a matching reservation key. 224 * with a matching reservation key.
213 */ 225 */
214 if (preempt_and_abort_list && 226 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
215 (core_scsi3_check_cdb_abort_and_preempt(
216 preempt_and_abort_list, cmd) != 0))
217 continue; 227 continue;
218 /* 228 /*
219 * Not aborting PROUT PREEMPT_AND_ABORT CDB.. 229 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
@@ -222,7 +232,7 @@ static void core_tmr_drain_task_list(
222 continue; 232 continue;
223 233
224 list_move_tail(&task->t_state_list, &drain_task_list); 234 list_move_tail(&task->t_state_list, &drain_task_list);
225 atomic_set(&task->task_state_active, 0); 235 task->t_state_active = false;
226 /* 236 /*
227 * Remove from task execute list before processing drain_task_list 237 * Remove from task execute list before processing drain_task_list
228 */ 238 */
@@ -321,9 +331,7 @@ static void core_tmr_drain_cmd_list(
321 * For PREEMPT_AND_ABORT usage, only process commands 331 * For PREEMPT_AND_ABORT usage, only process commands
322 * with a matching reservation key. 332 * with a matching reservation key.
323 */ 333 */
324 if (preempt_and_abort_list && 334 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
325 (core_scsi3_check_cdb_abort_and_preempt(
326 preempt_and_abort_list, cmd) != 0))
327 continue; 335 continue;
328 /* 336 /*
329 * Not aborting PROUT PREEMPT_AND_ABORT CDB.. 337 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 8ddd133025b9..b7668029bb31 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -39,13 +39,10 @@
39#include <scsi/scsi_cmnd.h> 39#include <scsi/scsi_cmnd.h>
40 40
41#include <target/target_core_base.h> 41#include <target/target_core_base.h>
42#include <target/target_core_device.h> 42#include <target/target_core_backend.h>
43#include <target/target_core_tpg.h> 43#include <target/target_core_fabric.h>
44#include <target/target_core_transport.h>
45#include <target/target_core_fabric_ops.h>
46 44
47#include "target_core_hba.h" 45#include "target_core_internal.h"
48#include "target_core_stat.h"
49 46
50extern struct se_device *g_lun0_dev; 47extern struct se_device *g_lun0_dev;
51 48
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 0257658e2e3e..d3ddd1361949 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -45,16 +45,12 @@
45#include <scsi/scsi_tcq.h> 45#include <scsi/scsi_tcq.h>
46 46
47#include <target/target_core_base.h> 47#include <target/target_core_base.h>
48#include <target/target_core_device.h> 48#include <target/target_core_backend.h>
49#include <target/target_core_tmr.h> 49#include <target/target_core_fabric.h>
50#include <target/target_core_tpg.h>
51#include <target/target_core_transport.h>
52#include <target/target_core_fabric_ops.h>
53#include <target/target_core_configfs.h> 50#include <target/target_core_configfs.h>
54 51
52#include "target_core_internal.h"
55#include "target_core_alua.h" 53#include "target_core_alua.h"
56#include "target_core_cdb.h"
57#include "target_core_hba.h"
58#include "target_core_pr.h" 54#include "target_core_pr.h"
59#include "target_core_ua.h" 55#include "target_core_ua.h"
60 56
@@ -72,7 +68,7 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
72 68
73static int transport_generic_write_pending(struct se_cmd *); 69static int transport_generic_write_pending(struct se_cmd *);
74static int transport_processing_thread(void *param); 70static int transport_processing_thread(void *param);
75static int __transport_execute_tasks(struct se_device *dev); 71static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
76static void transport_complete_task_attr(struct se_cmd *cmd); 72static void transport_complete_task_attr(struct se_cmd *cmd);
77static void transport_handle_queue_full(struct se_cmd *cmd, 73static void transport_handle_queue_full(struct se_cmd *cmd,
78 struct se_device *dev); 74 struct se_device *dev);
@@ -212,14 +208,13 @@ u32 scsi_get_new_index(scsi_index_t type)
212 return new_index; 208 return new_index;
213} 209}
214 210
215void transport_init_queue_obj(struct se_queue_obj *qobj) 211static void transport_init_queue_obj(struct se_queue_obj *qobj)
216{ 212{
217 atomic_set(&qobj->queue_cnt, 0); 213 atomic_set(&qobj->queue_cnt, 0);
218 INIT_LIST_HEAD(&qobj->qobj_list); 214 INIT_LIST_HEAD(&qobj->qobj_list);
219 init_waitqueue_head(&qobj->thread_wq); 215 init_waitqueue_head(&qobj->thread_wq);
220 spin_lock_init(&qobj->cmd_queue_lock); 216 spin_lock_init(&qobj->cmd_queue_lock);
221} 217}
222EXPORT_SYMBOL(transport_init_queue_obj);
223 218
224void transport_subsystem_check_init(void) 219void transport_subsystem_check_init(void)
225{ 220{
@@ -426,18 +421,18 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
426 if (task->task_flags & TF_ACTIVE) 421 if (task->task_flags & TF_ACTIVE)
427 continue; 422 continue;
428 423
429 if (!atomic_read(&task->task_state_active))
430 continue;
431
432 spin_lock_irqsave(&dev->execute_task_lock, flags); 424 spin_lock_irqsave(&dev->execute_task_lock, flags);
433 list_del(&task->t_state_list); 425 if (task->t_state_active) {
434 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", 426 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
435 cmd->se_tfo->get_task_tag(cmd), dev, task); 427 cmd->se_tfo->get_task_tag(cmd), dev, task);
436 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
437 428
438 atomic_set(&task->task_state_active, 0); 429 list_del(&task->t_state_list);
439 atomic_dec(&cmd->t_task_cdbs_ex_left); 430 atomic_dec(&cmd->t_task_cdbs_ex_left);
431 task->t_state_active = false;
432 }
433 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
440 } 434 }
435
441} 436}
442 437
443/* transport_cmd_check_stop(): 438/* transport_cmd_check_stop():
@@ -696,12 +691,6 @@ void transport_complete_task(struct se_task *task, int success)
696 struct se_cmd *cmd = task->task_se_cmd; 691 struct se_cmd *cmd = task->task_se_cmd;
697 struct se_device *dev = cmd->se_dev; 692 struct se_device *dev = cmd->se_dev;
698 unsigned long flags; 693 unsigned long flags;
699#if 0
700 pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
701 cmd->t_task_cdb[0], dev);
702#endif
703 if (dev)
704 atomic_inc(&dev->depth_left);
705 694
706 spin_lock_irqsave(&cmd->t_state_lock, flags); 695 spin_lock_irqsave(&cmd->t_state_lock, flags);
707 task->task_flags &= ~TF_ACTIVE; 696 task->task_flags &= ~TF_ACTIVE;
@@ -714,7 +703,7 @@ void transport_complete_task(struct se_task *task, int success)
714 if (dev && dev->transport->transport_complete) { 703 if (dev && dev->transport->transport_complete) {
715 if (dev->transport->transport_complete(task) != 0) { 704 if (dev->transport->transport_complete(task) != 0) {
716 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 705 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
717 task->task_sense = 1; 706 task->task_flags |= TF_HAS_SENSE;
718 success = 1; 707 success = 1;
719 } 708 }
720 } 709 }
@@ -743,13 +732,7 @@ void transport_complete_task(struct se_task *task, int success)
743 } 732 }
744 733
745 if (cmd->t_tasks_failed) { 734 if (cmd->t_tasks_failed) {
746 if (!task->task_error_status) { 735 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
747 task->task_error_status =
748 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
749 cmd->scsi_sense_reason =
750 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
751 }
752
753 INIT_WORK(&cmd->work, target_complete_failure_work); 736 INIT_WORK(&cmd->work, target_complete_failure_work);
754 } else { 737 } else {
755 atomic_set(&cmd->t_transport_complete, 1); 738 atomic_set(&cmd->t_transport_complete, 1);
@@ -824,7 +807,7 @@ static void __transport_add_task_to_execute_queue(
824 head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); 807 head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
825 atomic_inc(&dev->execute_tasks); 808 atomic_inc(&dev->execute_tasks);
826 809
827 if (atomic_read(&task->task_state_active)) 810 if (task->t_state_active)
828 return; 811 return;
829 /* 812 /*
830 * Determine if this task needs to go to HEAD_OF_QUEUE for the 813 * Determine if this task needs to go to HEAD_OF_QUEUE for the
@@ -838,7 +821,7 @@ static void __transport_add_task_to_execute_queue(
838 else 821 else
839 list_add_tail(&task->t_state_list, &dev->state_task_list); 822 list_add_tail(&task->t_state_list, &dev->state_task_list);
840 823
841 atomic_set(&task->task_state_active, 1); 824 task->t_state_active = true;
842 825
843 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", 826 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
844 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), 827 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
@@ -853,29 +836,26 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
853 836
854 spin_lock_irqsave(&cmd->t_state_lock, flags); 837 spin_lock_irqsave(&cmd->t_state_lock, flags);
855 list_for_each_entry(task, &cmd->t_task_list, t_list) { 838 list_for_each_entry(task, &cmd->t_task_list, t_list) {
856 if (atomic_read(&task->task_state_active))
857 continue;
858
859 spin_lock(&dev->execute_task_lock); 839 spin_lock(&dev->execute_task_lock);
860 list_add_tail(&task->t_state_list, &dev->state_task_list); 840 if (!task->t_state_active) {
861 atomic_set(&task->task_state_active, 1); 841 list_add_tail(&task->t_state_list,
862 842 &dev->state_task_list);
863 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", 843 task->t_state_active = true;
864 task->task_se_cmd->se_tfo->get_task_tag( 844
865 task->task_se_cmd), task, dev); 845 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
866 846 task->task_se_cmd->se_tfo->get_task_tag(
847 task->task_se_cmd), task, dev);
848 }
867 spin_unlock(&dev->execute_task_lock); 849 spin_unlock(&dev->execute_task_lock);
868 } 850 }
869 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 851 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
870} 852}
871 853
872static void transport_add_tasks_from_cmd(struct se_cmd *cmd) 854static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
873{ 855{
874 struct se_device *dev = cmd->se_dev; 856 struct se_device *dev = cmd->se_dev;
875 struct se_task *task, *task_prev = NULL; 857 struct se_task *task, *task_prev = NULL;
876 unsigned long flags;
877 858
878 spin_lock_irqsave(&dev->execute_task_lock, flags);
879 list_for_each_entry(task, &cmd->t_task_list, t_list) { 859 list_for_each_entry(task, &cmd->t_task_list, t_list) {
880 if (!list_empty(&task->t_execute_list)) 860 if (!list_empty(&task->t_execute_list))
881 continue; 861 continue;
@@ -886,6 +866,15 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
886 __transport_add_task_to_execute_queue(task, task_prev, dev); 866 __transport_add_task_to_execute_queue(task, task_prev, dev);
887 task_prev = task; 867 task_prev = task;
888 } 868 }
869}
870
871static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
872{
873 unsigned long flags;
874 struct se_device *dev = cmd->se_dev;
875
876 spin_lock_irqsave(&dev->execute_task_lock, flags);
877 __transport_add_tasks_from_cmd(cmd);
889 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 878 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
890} 879}
891 880
@@ -896,7 +885,7 @@ void __transport_remove_task_from_execute_queue(struct se_task *task,
896 atomic_dec(&dev->execute_tasks); 885 atomic_dec(&dev->execute_tasks);
897} 886}
898 887
899void transport_remove_task_from_execute_queue( 888static void transport_remove_task_from_execute_queue(
900 struct se_task *task, 889 struct se_task *task,
901 struct se_device *dev) 890 struct se_device *dev)
902{ 891{
@@ -983,9 +972,8 @@ void transport_dump_dev_state(
983 break; 972 break;
984 } 973 }
985 974
986 *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d", 975 *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d",
987 atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), 976 atomic_read(&dev->execute_tasks), dev->queue_depth);
988 dev->queue_depth);
989 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", 977 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
990 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); 978 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
991 *bl += sprintf(b + *bl, " "); 979 *bl += sprintf(b + *bl, " ");
@@ -1340,9 +1328,6 @@ struct se_device *transport_add_device_to_core_hba(
1340 spin_lock_init(&dev->se_port_lock); 1328 spin_lock_init(&dev->se_port_lock);
1341 spin_lock_init(&dev->se_tmr_lock); 1329 spin_lock_init(&dev->se_tmr_lock);
1342 spin_lock_init(&dev->qf_cmd_lock); 1330 spin_lock_init(&dev->qf_cmd_lock);
1343
1344 dev->queue_depth = dev_limits->queue_depth;
1345 atomic_set(&dev->depth_left, dev->queue_depth);
1346 atomic_set(&dev->dev_ordered_id, 0); 1331 atomic_set(&dev->dev_ordered_id, 0);
1347 1332
1348 se_dev_set_default_attribs(dev, dev_limits); 1333 se_dev_set_default_attribs(dev, dev_limits);
@@ -1654,6 +1639,80 @@ int transport_handle_cdb_direct(
1654} 1639}
1655EXPORT_SYMBOL(transport_handle_cdb_direct); 1640EXPORT_SYMBOL(transport_handle_cdb_direct);
1656 1641
1642/**
1643 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1644 *
1645 * @se_cmd: command descriptor to submit
1646 * @se_sess: associated se_sess for endpoint
1647 * @cdb: pointer to SCSI CDB
1648 * @sense: pointer to SCSI sense buffer
1649 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1650 * @data_length: fabric expected data transfer length
1651 * @task_addr: SAM task attribute
1652 * @data_dir: DMA data direction
1653 * @flags: flags for command submission from target_sc_flags_tables
1654 *
1655 * This may only be called from process context, and also currently
1656 * assumes internal allocation of fabric payload buffer by target-core.
1657 **/
1658int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1659 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
1660 u32 data_length, int task_attr, int data_dir, int flags)
1661{
1662 struct se_portal_group *se_tpg;
1663 int rc;
1664
1665 se_tpg = se_sess->se_tpg;
1666 BUG_ON(!se_tpg);
1667 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1668 BUG_ON(in_interrupt());
1669 /*
1670 * Initialize se_cmd for target operation. From this point
1671 * exceptions are handled by sending exception status via
1672 * target_core_fabric_ops->queue_status() callback
1673 */
1674 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1675 data_length, data_dir, task_attr, sense);
1676 /*
1677 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1678 * se_sess->sess_cmd_list. A second kref_get here is necessary
1679 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1680 * kref_put() to happen during fabric packet acknowledgement.
1681 */
1682 target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
1683 /*
1684 * Signal bidirectional data payloads to target-core
1685 */
1686 if (flags & TARGET_SCF_BIDI_OP)
1687 se_cmd->se_cmd_flags |= SCF_BIDI;
1688 /*
1689 * Locate se_lun pointer and attach it to struct se_cmd
1690 */
1691 if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0)
1692 goto out_check_cond;
1693 /*
1694 * Sanitize CDBs via transport_generic_cmd_sequencer() and
1695 * allocate the necessary tasks to complete the received CDB+data
1696 */
1697 rc = transport_generic_allocate_tasks(se_cmd, cdb);
1698 if (rc != 0)
1699 goto out_check_cond;
1700 /*
1701 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
1702 * for immediate execution of READs, otherwise wait for
1703 * transport_generic_handle_data() to be called for WRITEs
1704 * when fabric has filled the incoming buffer.
1705 */
1706 transport_handle_cdb_direct(se_cmd);
1707 return 0;
1708
1709out_check_cond:
1710 transport_send_check_condition_and_sense(se_cmd,
1711 se_cmd->scsi_sense_reason, 0);
1712 return 0;
1713}
1714EXPORT_SYMBOL(target_submit_cmd);
1715
1657/* 1716/*
1658 * Used by fabric module frontends defining a TFO->new_cmd_map() caller 1717 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
1659 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to 1718 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
@@ -1920,18 +1979,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
1920 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 1979 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1921} 1980}
1922 1981
1923static inline int transport_tcq_window_closed(struct se_device *dev)
1924{
1925 if (dev->dev_tcq_window_closed++ <
1926 PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
1927 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
1928 } else
1929 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
1930
1931 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
1932 return 0;
1933}
1934
1935/* 1982/*
1936 * Called from Fabric Module context from transport_execute_tasks() 1983 * Called from Fabric Module context from transport_execute_tasks()
1937 * 1984 *
@@ -2014,13 +2061,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2014static int transport_execute_tasks(struct se_cmd *cmd) 2061static int transport_execute_tasks(struct se_cmd *cmd)
2015{ 2062{
2016 int add_tasks; 2063 int add_tasks;
2017 2064 struct se_device *se_dev = cmd->se_dev;
2018 if (se_dev_check_online(cmd->se_dev) != 0) {
2019 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2020 transport_generic_request_failure(cmd);
2021 return 0;
2022 }
2023
2024 /* 2065 /*
2025 * Call transport_cmd_check_stop() to see if a fabric exception 2066 * Call transport_cmd_check_stop() to see if a fabric exception
2026 * has occurred that prevents execution. 2067 * has occurred that prevents execution.
@@ -2034,19 +2075,16 @@ static int transport_execute_tasks(struct se_cmd *cmd)
2034 if (!add_tasks) 2075 if (!add_tasks)
2035 goto execute_tasks; 2076 goto execute_tasks;
2036 /* 2077 /*
2037 * This calls transport_add_tasks_from_cmd() to handle 2078 * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
2038 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation 2079 * adds associated se_tasks while holding dev->execute_task_lock
2039 * (if enabled) in __transport_add_task_to_execute_queue() and 2080 * before I/O dispath to avoid a double spinlock access.
2040 * transport_add_task_check_sam_attr().
2041 */ 2081 */
2042 transport_add_tasks_from_cmd(cmd); 2082 __transport_execute_tasks(se_dev, cmd);
2083 return 0;
2043 } 2084 }
2044 /* 2085
2045 * Kick the execution queue for the cmd associated struct se_device
2046 * storage object.
2047 */
2048execute_tasks: 2086execute_tasks:
2049 __transport_execute_tasks(cmd->se_dev); 2087 __transport_execute_tasks(se_dev, NULL);
2050 return 0; 2088 return 0;
2051} 2089}
2052 2090
@@ -2056,24 +2094,18 @@ execute_tasks:
2056 * 2094 *
2057 * Called from transport_processing_thread() 2095 * Called from transport_processing_thread()
2058 */ 2096 */
2059static int __transport_execute_tasks(struct se_device *dev) 2097static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
2060{ 2098{
2061 int error; 2099 int error;
2062 struct se_cmd *cmd = NULL; 2100 struct se_cmd *cmd = NULL;
2063 struct se_task *task = NULL; 2101 struct se_task *task = NULL;
2064 unsigned long flags; 2102 unsigned long flags;
2065 2103
2066 /*
2067 * Check if there is enough room in the device and HBA queue to send
2068 * struct se_tasks to the selected transport.
2069 */
2070check_depth: 2104check_depth:
2071 if (!atomic_read(&dev->depth_left))
2072 return transport_tcq_window_closed(dev);
2073
2074 dev->dev_tcq_window_closed = 0;
2075
2076 spin_lock_irq(&dev->execute_task_lock); 2105 spin_lock_irq(&dev->execute_task_lock);
2106 if (new_cmd != NULL)
2107 __transport_add_tasks_from_cmd(new_cmd);
2108
2077 if (list_empty(&dev->execute_task_list)) { 2109 if (list_empty(&dev->execute_task_list)) {
2078 spin_unlock_irq(&dev->execute_task_lock); 2110 spin_unlock_irq(&dev->execute_task_lock);
2079 return 0; 2111 return 0;
@@ -2083,10 +2115,7 @@ check_depth:
2083 __transport_remove_task_from_execute_queue(task, dev); 2115 __transport_remove_task_from_execute_queue(task, dev);
2084 spin_unlock_irq(&dev->execute_task_lock); 2116 spin_unlock_irq(&dev->execute_task_lock);
2085 2117
2086 atomic_dec(&dev->depth_left);
2087
2088 cmd = task->task_se_cmd; 2118 cmd = task->task_se_cmd;
2089
2090 spin_lock_irqsave(&cmd->t_state_lock, flags); 2119 spin_lock_irqsave(&cmd->t_state_lock, flags);
2091 task->task_flags |= (TF_ACTIVE | TF_SENT); 2120 task->task_flags |= (TF_ACTIVE | TF_SENT);
2092 atomic_inc(&cmd->t_task_cdbs_sent); 2121 atomic_inc(&cmd->t_task_cdbs_sent);
@@ -2107,10 +2136,10 @@ check_depth:
2107 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2136 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2108 atomic_set(&cmd->t_transport_sent, 0); 2137 atomic_set(&cmd->t_transport_sent, 0);
2109 transport_stop_tasks_for_cmd(cmd); 2138 transport_stop_tasks_for_cmd(cmd);
2110 atomic_inc(&dev->depth_left);
2111 transport_generic_request_failure(cmd); 2139 transport_generic_request_failure(cmd);
2112 } 2140 }
2113 2141
2142 new_cmd = NULL;
2114 goto check_depth; 2143 goto check_depth;
2115 2144
2116 return 0; 2145 return 0;
@@ -2351,7 +2380,7 @@ static int transport_get_sense_data(struct se_cmd *cmd)
2351 2380
2352 list_for_each_entry_safe(task, task_tmp, 2381 list_for_each_entry_safe(task, task_tmp,
2353 &cmd->t_task_list, t_list) { 2382 &cmd->t_task_list, t_list) {
2354 if (!task->task_sense) 2383 if (!(task->task_flags & TF_HAS_SENSE))
2355 continue; 2384 continue;
2356 2385
2357 if (!dev->transport->get_sense_buffer) { 2386 if (!dev->transport->get_sense_buffer) {
@@ -3346,6 +3375,32 @@ static inline void transport_free_pages(struct se_cmd *cmd)
3346} 3375}
3347 3376
3348/** 3377/**
3378 * transport_release_cmd - free a command
3379 * @cmd: command to free
3380 *
3381 * This routine unconditionally frees a command, and reference counting
3382 * or list removal must be done in the caller.
3383 */
3384static void transport_release_cmd(struct se_cmd *cmd)
3385{
3386 BUG_ON(!cmd->se_tfo);
3387
3388 if (cmd->se_tmr_req)
3389 core_tmr_release_req(cmd->se_tmr_req);
3390 if (cmd->t_task_cdb != cmd->__t_task_cdb)
3391 kfree(cmd->t_task_cdb);
3392 /*
3393 * If this cmd has been setup with target_get_sess_cmd(), drop
3394 * the kref and call ->release_cmd() in kref callback.
3395 */
3396 if (cmd->check_release != 0) {
3397 target_put_sess_cmd(cmd->se_sess, cmd);
3398 return;
3399 }
3400 cmd->se_tfo->release_cmd(cmd);
3401}
3402
3403/**
3349 * transport_put_cmd - release a reference to a command 3404 * transport_put_cmd - release a reference to a command
3350 * @cmd: command to release 3405 * @cmd: command to release
3351 * 3406 *
@@ -3870,33 +3925,6 @@ queue_full:
3870 return 0; 3925 return 0;
3871} 3926}
3872 3927
3873/**
3874 * transport_release_cmd - free a command
3875 * @cmd: command to free
3876 *
3877 * This routine unconditionally frees a command, and reference counting
3878 * or list removal must be done in the caller.
3879 */
3880void transport_release_cmd(struct se_cmd *cmd)
3881{
3882 BUG_ON(!cmd->se_tfo);
3883
3884 if (cmd->se_tmr_req)
3885 core_tmr_release_req(cmd->se_tmr_req);
3886 if (cmd->t_task_cdb != cmd->__t_task_cdb)
3887 kfree(cmd->t_task_cdb);
3888 /*
3889 * Check if target_wait_for_sess_cmds() is expecting to
3890 * release se_cmd directly here..
3891 */
3892 if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd)
3893 if (cmd->se_tfo->check_release_cmd(cmd) != 0)
3894 return;
3895
3896 cmd->se_tfo->release_cmd(cmd);
3897}
3898EXPORT_SYMBOL(transport_release_cmd);
3899
3900void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 3928void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
3901{ 3929{
3902 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 3930 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
@@ -3923,11 +3951,22 @@ EXPORT_SYMBOL(transport_generic_free_cmd);
3923/* target_get_sess_cmd - Add command to active ->sess_cmd_list 3951/* target_get_sess_cmd - Add command to active ->sess_cmd_list
3924 * @se_sess: session to reference 3952 * @se_sess: session to reference
3925 * @se_cmd: command descriptor to add 3953 * @se_cmd: command descriptor to add
3954 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
3926 */ 3955 */
3927void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) 3956void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
3957 bool ack_kref)
3928{ 3958{
3929 unsigned long flags; 3959 unsigned long flags;
3930 3960
3961 kref_init(&se_cmd->cmd_kref);
3962 /*
3963 * Add a second kref if the fabric caller is expecting to handle
3964 * fabric acknowledgement that requires two target_put_sess_cmd()
3965 * invocations before se_cmd descriptor release.
3966 */
3967 if (ack_kref == true)
3968 kref_get(&se_cmd->cmd_kref);
3969
3931 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 3970 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3932 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 3971 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
3933 se_cmd->check_release = 1; 3972 se_cmd->check_release = 1;
@@ -3935,30 +3974,36 @@ void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
3935} 3974}
3936EXPORT_SYMBOL(target_get_sess_cmd); 3975EXPORT_SYMBOL(target_get_sess_cmd);
3937 3976
3938/* target_put_sess_cmd - Check for active I/O shutdown or list delete 3977static void target_release_cmd_kref(struct kref *kref)
3939 * @se_sess: session to reference
3940 * @se_cmd: command descriptor to drop
3941 */
3942int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
3943{ 3978{
3979 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
3980 struct se_session *se_sess = se_cmd->se_sess;
3944 unsigned long flags; 3981 unsigned long flags;
3945 3982
3946 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 3983 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3947 if (list_empty(&se_cmd->se_cmd_list)) { 3984 if (list_empty(&se_cmd->se_cmd_list)) {
3948 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 3985 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3949 WARN_ON(1); 3986 WARN_ON(1);
3950 return 0; 3987 return;
3951 } 3988 }
3952
3953 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 3989 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
3954 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 3990 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3955 complete(&se_cmd->cmd_wait_comp); 3991 complete(&se_cmd->cmd_wait_comp);
3956 return 1; 3992 return;
3957 } 3993 }
3958 list_del(&se_cmd->se_cmd_list); 3994 list_del(&se_cmd->se_cmd_list);
3959 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 3995 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3960 3996
3961 return 0; 3997 se_cmd->se_tfo->release_cmd(se_cmd);
3998}
3999
4000/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
4001 * @se_sess: session to reference
4002 * @se_cmd: command descriptor to drop
4003 */
4004int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
4005{
4006 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
3962} 4007}
3963EXPORT_SYMBOL(target_put_sess_cmd); 4008EXPORT_SYMBOL(target_put_sess_cmd);
3964 4009
@@ -4174,7 +4219,7 @@ check_cond:
4174 4219
4175static int transport_clear_lun_thread(void *p) 4220static int transport_clear_lun_thread(void *p)
4176{ 4221{
4177 struct se_lun *lun = (struct se_lun *)p; 4222 struct se_lun *lun = p;
4178 4223
4179 __transport_clear_lun_from_sessions(lun); 4224 __transport_clear_lun_from_sessions(lun);
4180 complete(&lun->lun_shutdown_comp); 4225 complete(&lun->lun_shutdown_comp);
@@ -4353,6 +4398,7 @@ int transport_send_check_condition_and_sense(
4353 case TCM_NON_EXISTENT_LUN: 4398 case TCM_NON_EXISTENT_LUN:
4354 /* CURRENT ERROR */ 4399 /* CURRENT ERROR */
4355 buffer[offset] = 0x70; 4400 buffer[offset] = 0x70;
4401 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4356 /* ILLEGAL REQUEST */ 4402 /* ILLEGAL REQUEST */
4357 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4403 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4358 /* LOGICAL UNIT NOT SUPPORTED */ 4404 /* LOGICAL UNIT NOT SUPPORTED */
@@ -4362,6 +4408,7 @@ int transport_send_check_condition_and_sense(
4362 case TCM_SECTOR_COUNT_TOO_MANY: 4408 case TCM_SECTOR_COUNT_TOO_MANY:
4363 /* CURRENT ERROR */ 4409 /* CURRENT ERROR */
4364 buffer[offset] = 0x70; 4410 buffer[offset] = 0x70;
4411 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4365 /* ILLEGAL REQUEST */ 4412 /* ILLEGAL REQUEST */
4366 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4413 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4367 /* INVALID COMMAND OPERATION CODE */ 4414 /* INVALID COMMAND OPERATION CODE */
@@ -4370,6 +4417,7 @@ int transport_send_check_condition_and_sense(
4370 case TCM_UNKNOWN_MODE_PAGE: 4417 case TCM_UNKNOWN_MODE_PAGE:
4371 /* CURRENT ERROR */ 4418 /* CURRENT ERROR */
4372 buffer[offset] = 0x70; 4419 buffer[offset] = 0x70;
4420 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4373 /* ILLEGAL REQUEST */ 4421 /* ILLEGAL REQUEST */
4374 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4422 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4375 /* INVALID FIELD IN CDB */ 4423 /* INVALID FIELD IN CDB */
@@ -4378,6 +4426,7 @@ int transport_send_check_condition_and_sense(
4378 case TCM_CHECK_CONDITION_ABORT_CMD: 4426 case TCM_CHECK_CONDITION_ABORT_CMD:
4379 /* CURRENT ERROR */ 4427 /* CURRENT ERROR */
4380 buffer[offset] = 0x70; 4428 buffer[offset] = 0x70;
4429 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4381 /* ABORTED COMMAND */ 4430 /* ABORTED COMMAND */
4382 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4431 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4383 /* BUS DEVICE RESET FUNCTION OCCURRED */ 4432 /* BUS DEVICE RESET FUNCTION OCCURRED */
@@ -4387,6 +4436,7 @@ int transport_send_check_condition_and_sense(
4387 case TCM_INCORRECT_AMOUNT_OF_DATA: 4436 case TCM_INCORRECT_AMOUNT_OF_DATA:
4388 /* CURRENT ERROR */ 4437 /* CURRENT ERROR */
4389 buffer[offset] = 0x70; 4438 buffer[offset] = 0x70;
4439 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4390 /* ABORTED COMMAND */ 4440 /* ABORTED COMMAND */
4391 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4441 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4392 /* WRITE ERROR */ 4442 /* WRITE ERROR */
@@ -4397,6 +4447,7 @@ int transport_send_check_condition_and_sense(
4397 case TCM_INVALID_CDB_FIELD: 4447 case TCM_INVALID_CDB_FIELD:
4398 /* CURRENT ERROR */ 4448 /* CURRENT ERROR */
4399 buffer[offset] = 0x70; 4449 buffer[offset] = 0x70;
4450 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4400 /* ABORTED COMMAND */ 4451 /* ABORTED COMMAND */
4401 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4452 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4402 /* INVALID FIELD IN CDB */ 4453 /* INVALID FIELD IN CDB */
@@ -4405,6 +4456,7 @@ int transport_send_check_condition_and_sense(
4405 case TCM_INVALID_PARAMETER_LIST: 4456 case TCM_INVALID_PARAMETER_LIST:
4406 /* CURRENT ERROR */ 4457 /* CURRENT ERROR */
4407 buffer[offset] = 0x70; 4458 buffer[offset] = 0x70;
4459 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4408 /* ABORTED COMMAND */ 4460 /* ABORTED COMMAND */
4409 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4461 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4410 /* INVALID FIELD IN PARAMETER LIST */ 4462 /* INVALID FIELD IN PARAMETER LIST */
@@ -4413,6 +4465,7 @@ int transport_send_check_condition_and_sense(
4413 case TCM_UNEXPECTED_UNSOLICITED_DATA: 4465 case TCM_UNEXPECTED_UNSOLICITED_DATA:
4414 /* CURRENT ERROR */ 4466 /* CURRENT ERROR */
4415 buffer[offset] = 0x70; 4467 buffer[offset] = 0x70;
4468 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4416 /* ABORTED COMMAND */ 4469 /* ABORTED COMMAND */
4417 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4470 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4418 /* WRITE ERROR */ 4471 /* WRITE ERROR */
@@ -4423,6 +4476,7 @@ int transport_send_check_condition_and_sense(
4423 case TCM_SERVICE_CRC_ERROR: 4476 case TCM_SERVICE_CRC_ERROR:
4424 /* CURRENT ERROR */ 4477 /* CURRENT ERROR */
4425 buffer[offset] = 0x70; 4478 buffer[offset] = 0x70;
4479 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4426 /* ABORTED COMMAND */ 4480 /* ABORTED COMMAND */
4427 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4481 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4428 /* PROTOCOL SERVICE CRC ERROR */ 4482 /* PROTOCOL SERVICE CRC ERROR */
@@ -4433,6 +4487,7 @@ int transport_send_check_condition_and_sense(
4433 case TCM_SNACK_REJECTED: 4487 case TCM_SNACK_REJECTED:
4434 /* CURRENT ERROR */ 4488 /* CURRENT ERROR */
4435 buffer[offset] = 0x70; 4489 buffer[offset] = 0x70;
4490 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4436 /* ABORTED COMMAND */ 4491 /* ABORTED COMMAND */
4437 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4492 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4438 /* READ ERROR */ 4493 /* READ ERROR */
@@ -4443,6 +4498,7 @@ int transport_send_check_condition_and_sense(
4443 case TCM_WRITE_PROTECTED: 4498 case TCM_WRITE_PROTECTED:
4444 /* CURRENT ERROR */ 4499 /* CURRENT ERROR */
4445 buffer[offset] = 0x70; 4500 buffer[offset] = 0x70;
4501 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4446 /* DATA PROTECT */ 4502 /* DATA PROTECT */
4447 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; 4503 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
4448 /* WRITE PROTECTED */ 4504 /* WRITE PROTECTED */
@@ -4451,6 +4507,7 @@ int transport_send_check_condition_and_sense(
4451 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 4507 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
4452 /* CURRENT ERROR */ 4508 /* CURRENT ERROR */
4453 buffer[offset] = 0x70; 4509 buffer[offset] = 0x70;
4510 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4454 /* UNIT ATTENTION */ 4511 /* UNIT ATTENTION */
4455 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 4512 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
4456 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 4513 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
@@ -4460,6 +4517,7 @@ int transport_send_check_condition_and_sense(
4460 case TCM_CHECK_CONDITION_NOT_READY: 4517 case TCM_CHECK_CONDITION_NOT_READY:
4461 /* CURRENT ERROR */ 4518 /* CURRENT ERROR */
4462 buffer[offset] = 0x70; 4519 buffer[offset] = 0x70;
4520 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4463 /* Not Ready */ 4521 /* Not Ready */
4464 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; 4522 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
4465 transport_get_sense_codes(cmd, &asc, &ascq); 4523 transport_get_sense_codes(cmd, &asc, &ascq);
@@ -4470,6 +4528,7 @@ int transport_send_check_condition_and_sense(
4470 default: 4528 default:
4471 /* CURRENT ERROR */ 4529 /* CURRENT ERROR */
4472 buffer[offset] = 0x70; 4530 buffer[offset] = 0x70;
4531 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
4473 /* ILLEGAL REQUEST */ 4532 /* ILLEGAL REQUEST */
4474 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4533 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4475 /* LOGICAL UNIT COMMUNICATION FAILURE */ 4534 /* LOGICAL UNIT COMMUNICATION FAILURE */
@@ -4545,11 +4604,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
4545 cmd->se_tfo->queue_status(cmd); 4604 cmd->se_tfo->queue_status(cmd);
4546} 4605}
4547 4606
4548/* transport_generic_do_tmr(): 4607static int transport_generic_do_tmr(struct se_cmd *cmd)
4549 *
4550 *
4551 */
4552int transport_generic_do_tmr(struct se_cmd *cmd)
4553{ 4608{
4554 struct se_device *dev = cmd->se_dev; 4609 struct se_device *dev = cmd->se_dev;
4555 struct se_tmr_req *tmr = cmd->se_tmr_req; 4610 struct se_tmr_req *tmr = cmd->se_tmr_req;
@@ -4597,7 +4652,7 @@ static int transport_processing_thread(void *param)
4597{ 4652{
4598 int ret; 4653 int ret;
4599 struct se_cmd *cmd; 4654 struct se_cmd *cmd;
4600 struct se_device *dev = (struct se_device *) param; 4655 struct se_device *dev = param;
4601 4656
4602 while (!kthread_should_stop()) { 4657 while (!kthread_should_stop()) {
4603 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, 4658 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
@@ -4607,8 +4662,6 @@ static int transport_processing_thread(void *param)
4607 goto out; 4662 goto out;
4608 4663
4609get_cmd: 4664get_cmd:
4610 __transport_execute_tasks(dev);
4611
4612 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); 4665 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
4613 if (!cmd) 4666 if (!cmd)
4614 continue; 4667 continue;
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 50a480db7a66..3e12f6bcfa10 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -30,13 +30,11 @@
30#include <scsi/scsi_cmnd.h> 30#include <scsi/scsi_cmnd.h>
31 31
32#include <target/target_core_base.h> 32#include <target/target_core_base.h>
33#include <target/target_core_device.h> 33#include <target/target_core_fabric.h>
34#include <target/target_core_transport.h>
35#include <target/target_core_fabric_ops.h>
36#include <target/target_core_configfs.h> 34#include <target/target_core_configfs.h>
37 35
36#include "target_core_internal.h"
38#include "target_core_alua.h" 37#include "target_core_alua.h"
39#include "target_core_hba.h"
40#include "target_core_pr.h" 38#include "target_core_pr.h"
41#include "target_core_ua.h" 39#include "target_core_ua.h"
42 40
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 71fc9cea5dc9..addc18f727ea 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -39,12 +39,8 @@
39#include <scsi/fc_encode.h> 39#include <scsi/fc_encode.h>
40 40
41#include <target/target_core_base.h> 41#include <target/target_core_base.h>
42#include <target/target_core_transport.h> 42#include <target/target_core_fabric.h>
43#include <target/target_core_fabric_ops.h>
44#include <target/target_core_device.h>
45#include <target/target_core_tpg.h>
46#include <target/target_core_configfs.h> 43#include <target/target_core_configfs.h>
47#include <target/target_core_tmr.h>
48#include <target/configfs_macros.h> 44#include <target/configfs_macros.h>
49 45
50#include "tcm_fc.h" 46#include "tcm_fc.h"
@@ -367,6 +363,11 @@ static void ft_send_tm(struct ft_cmd *cmd)
367 struct ft_sess *sess; 363 struct ft_sess *sess;
368 u8 tm_func; 364 u8 tm_func;
369 365
366 transport_init_se_cmd(&cmd->se_cmd, &ft_configfs->tf_ops,
367 cmd->sess->se_sess, 0, DMA_NONE, 0,
368 &cmd->ft_sense_buffer[0]);
369 target_get_sess_cmd(cmd->sess->se_sess, &cmd->se_cmd, false);
370
370 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); 371 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
371 372
372 switch (fcp->fc_tm_flags) { 373 switch (fcp->fc_tm_flags) {
@@ -420,7 +421,6 @@ static void ft_send_tm(struct ft_cmd *cmd)
420 sess = cmd->sess; 421 sess = cmd->sess;
421 transport_send_check_condition_and_sense(&cmd->se_cmd, 422 transport_send_check_condition_and_sense(&cmd->se_cmd,
422 cmd->se_cmd.scsi_sense_reason, 0); 423 cmd->se_cmd.scsi_sense_reason, 0);
423 transport_generic_free_cmd(&cmd->se_cmd, 0);
424 ft_sess_put(sess); 424 ft_sess_put(sess);
425 return; 425 return;
426 } 426 }
@@ -536,7 +536,6 @@ static void ft_send_work(struct work_struct *work)
536{ 536{
537 struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); 537 struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
538 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); 538 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
539 struct se_cmd *se_cmd;
540 struct fcp_cmnd *fcp; 539 struct fcp_cmnd *fcp;
541 int data_dir = 0; 540 int data_dir = 0;
542 u32 data_len; 541 u32 data_len;
@@ -591,15 +590,6 @@ static void ft_send_work(struct work_struct *work)
591 data_len = ntohl(fcp->fc_dl); 590 data_len = ntohl(fcp->fc_dl);
592 cmd->cdb = fcp->fc_cdb; 591 cmd->cdb = fcp->fc_cdb;
593 } 592 }
594
595 se_cmd = &cmd->se_cmd;
596 /*
597 * Initialize struct se_cmd descriptor from target_core_mod
598 * infrastructure
599 */
600 transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess,
601 data_len, data_dir, task_attr,
602 &cmd->ft_sense_buffer[0]);
603 /* 593 /*
604 * Check for FCP task management flags 594 * Check for FCP task management flags
605 */ 595 */
@@ -607,39 +597,20 @@ static void ft_send_work(struct work_struct *work)
607 ft_send_tm(cmd); 597 ft_send_tm(cmd);
608 return; 598 return;
609 } 599 }
610
611 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); 600 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
612
613 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); 601 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
614 ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun); 602 /*
603 * Use a single se_cmd->cmd_kref as we expect to release se_cmd
604 * directly from ft_check_stop_free callback in response path.
605 */
606 ret = target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, cmd->cdb,
607 &cmd->ft_sense_buffer[0], cmd->lun, data_len,
608 task_attr, data_dir, 0);
609 pr_debug("r_ctl %x alloc target_submit_cmd %d\n", fh->fh_r_ctl, ret);
615 if (ret < 0) { 610 if (ret < 0) {
616 ft_dump_cmd(cmd, __func__); 611 ft_dump_cmd(cmd, __func__);
617 transport_send_check_condition_and_sense(&cmd->se_cmd,
618 cmd->se_cmd.scsi_sense_reason, 0);
619 return;
620 }
621
622 ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
623
624 pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
625 ft_dump_cmd(cmd, __func__);
626
627 if (ret == -ENOMEM) {
628 transport_send_check_condition_and_sense(se_cmd,
629 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
630 transport_generic_free_cmd(se_cmd, 0);
631 return;
632 }
633 if (ret == -EINVAL) {
634 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
635 ft_queue_status(se_cmd);
636 else
637 transport_send_check_condition_and_sense(se_cmd,
638 se_cmd->scsi_sense_reason, 0);
639 transport_generic_free_cmd(se_cmd, 0);
640 return; 612 return;
641 } 613 }
642 transport_handle_cdb_direct(se_cmd);
643 return; 614 return;
644 615
645err: 616err:
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 9402b7387cac..73852fbc857b 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -41,12 +41,8 @@
41#include <scsi/libfc.h> 41#include <scsi/libfc.h>
42 42
43#include <target/target_core_base.h> 43#include <target/target_core_base.h>
44#include <target/target_core_transport.h> 44#include <target/target_core_fabric.h>
45#include <target/target_core_fabric_ops.h>
46#include <target/target_core_fabric_configfs.h> 45#include <target/target_core_fabric_configfs.h>
47#include <target/target_core_fabric_lib.h>
48#include <target/target_core_device.h>
49#include <target/target_core_tpg.h>
50#include <target/target_core_configfs.h> 46#include <target/target_core_configfs.h>
51#include <target/configfs_macros.h> 47#include <target/configfs_macros.h>
52 48
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 1369b1cb103d..d8cabc21036d 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -48,10 +48,7 @@
48#include <scsi/fc_encode.h> 48#include <scsi/fc_encode.h>
49 49
50#include <target/target_core_base.h> 50#include <target/target_core_base.h>
51#include <target/target_core_transport.h> 51#include <target/target_core_fabric.h>
52#include <target/target_core_fabric_ops.h>
53#include <target/target_core_device.h>
54#include <target/target_core_tpg.h>
55#include <target/target_core_configfs.h> 52#include <target/target_core_configfs.h>
56#include <target/configfs_macros.h> 53#include <target/configfs_macros.h>
57 54
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 326921385aff..4c0507cf808c 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -40,10 +40,7 @@
40#include <scsi/libfc.h> 40#include <scsi/libfc.h>
41 41
42#include <target/target_core_base.h> 42#include <target/target_core_base.h>
43#include <target/target_core_transport.h> 43#include <target/target_core_fabric.h>
44#include <target/target_core_fabric_ops.h>
45#include <target/target_core_device.h>
46#include <target/target_core_tpg.h>
47#include <target/target_core_configfs.h> 44#include <target/target_core_configfs.h>
48#include <target/configfs_macros.h> 45#include <target/configfs_macros.h>
49 46
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 66bc74d9ce2a..378276c9d3cf 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -146,7 +146,7 @@ static int adp8860_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask
146 146
147 ret = adp8860_read(client, reg, &reg_val); 147 ret = adp8860_read(client, reg, &reg_val);
148 148
149 if (!ret && ((reg_val & bit_mask) == 0)) { 149 if (!ret && ((reg_val & bit_mask) != bit_mask)) {
150 reg_val |= bit_mask; 150 reg_val |= bit_mask;
151 ret = adp8860_write(client, reg, reg_val); 151 ret = adp8860_write(client, reg, reg_val);
152 } 152 }
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 6c68a6899e87..6735059376d6 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -160,7 +160,7 @@ static int adp8870_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask
160 160
161 ret = adp8870_read(client, reg, &reg_val); 161 ret = adp8870_read(client, reg, &reg_val);
162 162
163 if (!ret && ((reg_val & bit_mask) == 0)) { 163 if (!ret && ((reg_val & bit_mask) != bit_mask)) {
164 reg_val |= bit_mask; 164 reg_val |= bit_mask;
165 ret = adp8870_write(client, reg, reg_val); 165 ret = adp8870_write(client, reg, reg_val);
166 } 166 }
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 4f5d1c4cb6ab..27d1d7a29c77 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -190,6 +190,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
190 190
191 priv->io_reg = regulator_get(&spi->dev, "vdd"); 191 priv->io_reg = regulator_get(&spi->dev, "vdd");
192 if (IS_ERR(priv->io_reg)) { 192 if (IS_ERR(priv->io_reg)) {
193 ret = PTR_ERR(priv->io_reg);
193 dev_err(&spi->dev, "%s: Unable to get the IO regulator\n", 194 dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
194 __func__); 195 __func__);
195 goto err3; 196 goto err3;
@@ -197,6 +198,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
197 198
198 priv->core_reg = regulator_get(&spi->dev, "vcore"); 199 priv->core_reg = regulator_get(&spi->dev, "vcore");
199 if (IS_ERR(priv->core_reg)) { 200 if (IS_ERR(priv->core_reg)) {
201 ret = PTR_ERR(priv->core_reg);
200 dev_err(&spi->dev, "%s: Unable to get the core regulator\n", 202 dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
201 __func__); 203 __func__);
202 goto err4; 204 goto err4;
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index ba6eda4b5143..0edb91c0de6b 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -1,5 +1,6 @@
1#include <linux/bio.h> 1#include <linux/bio.h>
2#include <linux/io.h> 2#include <linux/io.h>
3#include <linux/export.h>
3#include <xen/page.h> 4#include <xen/page.h>
4 5
5bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, 6bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
@@ -11,3 +12,4 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
11 return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) && 12 return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
12 ((mfn1 == mfn2) || ((mfn1+1) == mfn2)); 13 ((mfn1 == mfn2) || ((mfn1+1) == mfn2));
13} 14}
15EXPORT_SYMBOL(xen_biovec_phys_mergeable);
diff --git a/firmware/Makefile b/firmware/Makefile
index 5f43bfba3c7a..0d15a3d113a2 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -82,7 +82,6 @@ fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis \
82fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin 82fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin
83fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \ 83fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
84 advansys/3550.bin advansys/38C0800.bin 84 advansys/3550.bin advansys/38C0800.bin
85fw-shipped-$(CONFIG_SCSI_ISCI) += isci/isci_firmware.bin
86fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \ 85fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \
87 qlogic/12160.bin 86 qlogic/12160.bin
88fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin 87fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin
diff --git a/firmware/isci/isci_firmware.bin.ihex b/firmware/isci/isci_firmware.bin.ihex
deleted file mode 100644
index 2e6619570072..000000000000
--- a/firmware/isci/isci_firmware.bin.ihex
+++ /dev/null
@@ -1,16 +0,0 @@
1:10000000495343554F454D42E80018100002000087
2:1000100000000000000000000101000000000000DE
3:10002000FFFFCF5F0100000008DD0B0000FC0F00A8
4:10003000097C0B006EFC0A00FFFFCF5F010000008F
5:1000400008DD0B0000FC0F00097C0B006EFC0A00B1
6:10005000FFFFCF5F0100000008DD0B0000FC0F0078
7:10006000097C0B006EFC0A00FFFFCF5F010000005F
8:1000700008DD0B0000FC0F00097C0B006EFC0A0081
9:100080000101000000000000FFFFCF5F0200000040
10:1000900008DD0B0000FC0F00097C0B006EFC0A0061
11:1000A000FFFFCF5F0200000008DD0B0000FC0F0027
12:1000B000097C0B006EFC0A00FFFFCF5F020000000E
13:1000C00008DD0B0000FC0F00097C0B006EFC0A0031
14:1000D000FFFFCF5F0200000008DD0B0000FC0F00F7
15:0800E000097C0B006EFC0A0014
16:00000001FF
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index f66cc1625150..0554b00a7b33 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -140,7 +140,6 @@ config CIFS_DFS_UPCALL
140 140
141config CIFS_FSCACHE 141config CIFS_FSCACHE
142 bool "Provide CIFS client caching support (EXPERIMENTAL)" 142 bool "Provide CIFS client caching support (EXPERIMENTAL)"
143 depends on EXPERIMENTAL
144 depends on CIFS=m && FSCACHE || CIFS=y && FSCACHE=y 143 depends on CIFS=m && FSCACHE || CIFS=y && FSCACHE=y
145 help 144 help
146 Makes CIFS FS-Cache capable. Say Y here if you want your CIFS data 145 Makes CIFS FS-Cache capable. Say Y here if you want your CIFS data
@@ -149,7 +148,7 @@ config CIFS_FSCACHE
149 148
150config CIFS_ACL 149config CIFS_ACL
151 bool "Provide CIFS ACL support (EXPERIMENTAL)" 150 bool "Provide CIFS ACL support (EXPERIMENTAL)"
152 depends on EXPERIMENTAL && CIFS_XATTR && KEYS 151 depends on CIFS_XATTR && KEYS
153 help 152 help
154 Allows to fetch CIFS/NTFS ACL from the server. The DACL blob 153 Allows to fetch CIFS/NTFS ACL from the server. The DACL blob
155 is handed over to the application/caller. 154 is handed over to the application/caller.
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 84e8c0724704..24b3dfc05282 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -676,14 +676,23 @@ static ssize_t cifs_multiuser_mount_proc_write(struct file *file,
676{ 676{
677 char c; 677 char c;
678 int rc; 678 int rc;
679 static bool warned;
679 680
680 rc = get_user(c, buffer); 681 rc = get_user(c, buffer);
681 if (rc) 682 if (rc)
682 return rc; 683 return rc;
683 if (c == '0' || c == 'n' || c == 'N') 684 if (c == '0' || c == 'n' || c == 'N')
684 multiuser_mount = 0; 685 multiuser_mount = 0;
685 else if (c == '1' || c == 'y' || c == 'Y') 686 else if (c == '1' || c == 'y' || c == 'Y') {
686 multiuser_mount = 1; 687 multiuser_mount = 1;
688 if (!warned) {
689 warned = true;
690 printk(KERN_WARNING "CIFS VFS: The legacy multiuser "
691 "mount code is scheduled to be deprecated in "
692 "3.5. Please switch to using the multiuser "
693 "mount option.");
694 }
695 }
687 696
688 return count; 697 return count;
689} 698}
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 2272fd5fe5b7..e622863b292f 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -113,9 +113,11 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo)
113 MAX_MECH_STR_LEN + 113 MAX_MECH_STR_LEN +
114 UID_KEY_LEN + (sizeof(uid_t) * 2) + 114 UID_KEY_LEN + (sizeof(uid_t) * 2) +
115 CREDUID_KEY_LEN + (sizeof(uid_t) * 2) + 115 CREDUID_KEY_LEN + (sizeof(uid_t) * 2) +
116 USER_KEY_LEN + strlen(sesInfo->user_name) +
117 PID_KEY_LEN + (sizeof(pid_t) * 2) + 1; 116 PID_KEY_LEN + (sizeof(pid_t) * 2) + 1;
118 117
118 if (sesInfo->user_name)
119 desc_len += USER_KEY_LEN + strlen(sesInfo->user_name);
120
119 spnego_key = ERR_PTR(-ENOMEM); 121 spnego_key = ERR_PTR(-ENOMEM);
120 description = kzalloc(desc_len, GFP_KERNEL); 122 description = kzalloc(desc_len, GFP_KERNEL);
121 if (description == NULL) 123 if (description == NULL)
@@ -152,8 +154,10 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo)
152 dp = description + strlen(description); 154 dp = description + strlen(description);
153 sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid); 155 sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid);
154 156
155 dp = description + strlen(description); 157 if (sesInfo->user_name) {
156 sprintf(dp, ";user=%s", sesInfo->user_name); 158 dp = description + strlen(description);
159 sprintf(dp, ";user=%s", sesInfo->user_name);
160 }
157 161
158 dp = description + strlen(description); 162 dp = description + strlen(description);
159 sprintf(dp, ";pid=0x%x", current->pid); 163 sprintf(dp, ";pid=0x%x", current->pid);
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 1b2e180b018d..fbb9da951843 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -27,17 +27,17 @@
27#include "cifs_debug.h" 27#include "cifs_debug.h"
28 28
29/* 29/*
30 * cifs_ucs2_bytes - how long will a string be after conversion? 30 * cifs_utf16_bytes - how long will a string be after conversion?
31 * @ucs - pointer to input string 31 * @utf16 - pointer to input string
32 * @maxbytes - don't go past this many bytes of input string 32 * @maxbytes - don't go past this many bytes of input string
33 * @codepage - destination codepage 33 * @codepage - destination codepage
34 * 34 *
35 * Walk a ucs2le string and return the number of bytes that the string will 35 * Walk a utf16le string and return the number of bytes that the string will
36 * be after being converted to the given charset, not including any null 36 * be after being converted to the given charset, not including any null
37 * termination required. Don't walk past maxbytes in the source buffer. 37 * termination required. Don't walk past maxbytes in the source buffer.
38 */ 38 */
39int 39int
40cifs_ucs2_bytes(const __le16 *from, int maxbytes, 40cifs_utf16_bytes(const __le16 *from, int maxbytes,
41 const struct nls_table *codepage) 41 const struct nls_table *codepage)
42{ 42{
43 int i; 43 int i;
@@ -122,7 +122,7 @@ cp_convert:
122} 122}
123 123
124/* 124/*
125 * cifs_from_ucs2 - convert utf16le string to local charset 125 * cifs_from_utf16 - convert utf16le string to local charset
126 * @to - destination buffer 126 * @to - destination buffer
127 * @from - source buffer 127 * @from - source buffer
128 * @tolen - destination buffer size (in bytes) 128 * @tolen - destination buffer size (in bytes)
@@ -130,7 +130,7 @@ cp_convert:
130 * @codepage - codepage to which characters should be converted 130 * @codepage - codepage to which characters should be converted
131 * @mapchar - should characters be remapped according to the mapchars option? 131 * @mapchar - should characters be remapped according to the mapchars option?
132 * 132 *
133 * Convert a little-endian ucs2le string (as sent by the server) to a string 133 * Convert a little-endian utf16le string (as sent by the server) to a string
134 * in the provided codepage. The tolen and fromlen parameters are to ensure 134 * in the provided codepage. The tolen and fromlen parameters are to ensure
135 * that the code doesn't walk off of the end of the buffer (which is always 135 * that the code doesn't walk off of the end of the buffer (which is always
136 * a danger if the alignment of the source buffer is off). The destination 136 * a danger if the alignment of the source buffer is off). The destination
@@ -139,12 +139,12 @@ cp_convert:
139 * null terminator). 139 * null terminator).
140 * 140 *
141 * Note that some windows versions actually send multiword UTF-16 characters 141 * Note that some windows versions actually send multiword UTF-16 characters
142 * instead of straight UCS-2. The linux nls routines however aren't able to 142 * instead of straight UTF16-2. The linux nls routines however aren't able to
143 * deal with those characters properly. In the event that we get some of 143 * deal with those characters properly. In the event that we get some of
144 * those characters, they won't be translated properly. 144 * those characters, they won't be translated properly.
145 */ 145 */
146int 146int
147cifs_from_ucs2(char *to, const __le16 *from, int tolen, int fromlen, 147cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
148 const struct nls_table *codepage, bool mapchar) 148 const struct nls_table *codepage, bool mapchar)
149{ 149{
150 int i, charlen, safelen; 150 int i, charlen, safelen;
@@ -190,13 +190,13 @@ cifs_from_ucs2(char *to, const __le16 *from, int tolen, int fromlen,
190} 190}
191 191
192/* 192/*
193 * NAME: cifs_strtoUCS() 193 * NAME: cifs_strtoUTF16()
194 * 194 *
195 * FUNCTION: Convert character string to unicode string 195 * FUNCTION: Convert character string to unicode string
196 * 196 *
197 */ 197 */
198int 198int
199cifs_strtoUCS(__le16 *to, const char *from, int len, 199cifs_strtoUTF16(__le16 *to, const char *from, int len,
200 const struct nls_table *codepage) 200 const struct nls_table *codepage)
201{ 201{
202 int charlen; 202 int charlen;
@@ -206,7 +206,7 @@ cifs_strtoUCS(__le16 *to, const char *from, int len,
206 for (i = 0; len && *from; i++, from += charlen, len -= charlen) { 206 for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
207 charlen = codepage->char2uni(from, len, &wchar_to); 207 charlen = codepage->char2uni(from, len, &wchar_to);
208 if (charlen < 1) { 208 if (charlen < 1) {
209 cERROR(1, "strtoUCS: char2uni of 0x%x returned %d", 209 cERROR(1, "strtoUTF16: char2uni of 0x%x returned %d",
210 *from, charlen); 210 *from, charlen);
211 /* A question mark */ 211 /* A question mark */
212 wchar_to = 0x003f; 212 wchar_to = 0x003f;
@@ -220,7 +220,8 @@ cifs_strtoUCS(__le16 *to, const char *from, int len,
220} 220}
221 221
222/* 222/*
223 * cifs_strndup_from_ucs - copy a string from wire format to the local codepage 223 * cifs_strndup_from_utf16 - copy a string from wire format to the local
224 * codepage
224 * @src - source string 225 * @src - source string
225 * @maxlen - don't walk past this many bytes in the source string 226 * @maxlen - don't walk past this many bytes in the source string
226 * @is_unicode - is this a unicode string? 227 * @is_unicode - is this a unicode string?
@@ -231,19 +232,19 @@ cifs_strtoUCS(__le16 *to, const char *from, int len,
231 * error. 232 * error.
232 */ 233 */
233char * 234char *
234cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode, 235cifs_strndup_from_utf16(const char *src, const int maxlen,
235 const struct nls_table *codepage) 236 const bool is_unicode, const struct nls_table *codepage)
236{ 237{
237 int len; 238 int len;
238 char *dst; 239 char *dst;
239 240
240 if (is_unicode) { 241 if (is_unicode) {
241 len = cifs_ucs2_bytes((__le16 *) src, maxlen, codepage); 242 len = cifs_utf16_bytes((__le16 *) src, maxlen, codepage);
242 len += nls_nullsize(codepage); 243 len += nls_nullsize(codepage);
243 dst = kmalloc(len, GFP_KERNEL); 244 dst = kmalloc(len, GFP_KERNEL);
244 if (!dst) 245 if (!dst)
245 return NULL; 246 return NULL;
246 cifs_from_ucs2(dst, (__le16 *) src, len, maxlen, codepage, 247 cifs_from_utf16(dst, (__le16 *) src, len, maxlen, codepage,
247 false); 248 false);
248 } else { 249 } else {
249 len = strnlen(src, maxlen); 250 len = strnlen(src, maxlen);
@@ -264,7 +265,7 @@ cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode,
264 * names are little endian 16 bit Unicode on the wire 265 * names are little endian 16 bit Unicode on the wire
265 */ 266 */
266int 267int
267cifsConvertToUCS(__le16 *target, const char *source, int srclen, 268cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
268 const struct nls_table *cp, int mapChars) 269 const struct nls_table *cp, int mapChars)
269{ 270{
270 int i, j, charlen; 271 int i, j, charlen;
@@ -273,7 +274,7 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen,
273 wchar_t tmp; 274 wchar_t tmp;
274 275
275 if (!mapChars) 276 if (!mapChars)
276 return cifs_strtoUCS(target, source, PATH_MAX, cp); 277 return cifs_strtoUTF16(target, source, PATH_MAX, cp);
277 278
278 for (i = 0, j = 0; i < srclen; j++) { 279 for (i = 0, j = 0; i < srclen; j++) {
279 src_char = source[i]; 280 src_char = source[i];
@@ -281,7 +282,7 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen,
281 switch (src_char) { 282 switch (src_char) {
282 case 0: 283 case 0:
283 put_unaligned(0, &target[j]); 284 put_unaligned(0, &target[j]);
284 goto ctoUCS_out; 285 goto ctoUTF16_out;
285 case ':': 286 case ':':
286 dst_char = cpu_to_le16(UNI_COLON); 287 dst_char = cpu_to_le16(UNI_COLON);
287 break; 288 break;
@@ -326,7 +327,7 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen,
326 put_unaligned(dst_char, &target[j]); 327 put_unaligned(dst_char, &target[j]);
327 } 328 }
328 329
329ctoUCS_out: 330ctoUTF16_out:
330 return i; 331 return i;
331} 332}
332 333
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index 6d02fd560566..a513a546700b 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -74,16 +74,16 @@ extern const struct UniCaseRange CifsUniLowerRange[];
74#endif /* UNIUPR_NOLOWER */ 74#endif /* UNIUPR_NOLOWER */
75 75
76#ifdef __KERNEL__ 76#ifdef __KERNEL__
77int cifs_from_ucs2(char *to, const __le16 *from, int tolen, int fromlen, 77int cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
78 const struct nls_table *codepage, bool mapchar); 78 const struct nls_table *codepage, bool mapchar);
79int cifs_ucs2_bytes(const __le16 *from, int maxbytes, 79int cifs_utf16_bytes(const __le16 *from, int maxbytes,
80 const struct nls_table *codepage); 80 const struct nls_table *codepage);
81int cifs_strtoUCS(__le16 *, const char *, int, const struct nls_table *); 81int cifs_strtoUTF16(__le16 *, const char *, int, const struct nls_table *);
82char *cifs_strndup_from_ucs(const char *src, const int maxlen, 82char *cifs_strndup_from_utf16(const char *src, const int maxlen,
83 const bool is_unicode, 83 const bool is_unicode,
84 const struct nls_table *codepage); 84 const struct nls_table *codepage);
85extern int cifsConvertToUCS(__le16 *target, const char *source, int maxlen, 85extern int cifsConvertToUTF16(__le16 *target, const char *source, int maxlen,
86 const struct nls_table *cp, int mapChars); 86 const struct nls_table *cp, int mapChars);
87 87
88#endif 88#endif
89 89
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 72ddf23ef6f7..c1b254487388 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -909,6 +909,8 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
909 umode_t group_mask = S_IRWXG; 909 umode_t group_mask = S_IRWXG;
910 umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO; 910 umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
911 911
912 if (num_aces > ULONG_MAX / sizeof(struct cifs_ace *))
913 return;
912 ppace = kmalloc(num_aces * sizeof(struct cifs_ace *), 914 ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
913 GFP_KERNEL); 915 GFP_KERNEL);
914 if (!ppace) { 916 if (!ppace) {
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 5d9b9acc5fce..63c460e503b6 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -327,7 +327,7 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
327 attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME); 327 attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME);
328 attrptr->length = cpu_to_le16(2 * dlen); 328 attrptr->length = cpu_to_le16(2 * dlen);
329 blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name); 329 blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
330 cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp); 330 cifs_strtoUTF16((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
331 331
332 return 0; 332 return 0;
333} 333}
@@ -376,7 +376,7 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
376 kmalloc(attrsize + 1, GFP_KERNEL); 376 kmalloc(attrsize + 1, GFP_KERNEL);
377 if (!ses->domainName) 377 if (!ses->domainName)
378 return -ENOMEM; 378 return -ENOMEM;
379 cifs_from_ucs2(ses->domainName, 379 cifs_from_utf16(ses->domainName,
380 (__le16 *)blobptr, attrsize, attrsize, 380 (__le16 *)blobptr, attrsize, attrsize,
381 nls_cp, false); 381 nls_cp, false);
382 break; 382 break;
@@ -420,15 +420,20 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
420 } 420 }
421 421
422 /* convert ses->user_name to unicode and uppercase */ 422 /* convert ses->user_name to unicode and uppercase */
423 len = strlen(ses->user_name); 423 len = ses->user_name ? strlen(ses->user_name) : 0;
424 user = kmalloc(2 + (len * 2), GFP_KERNEL); 424 user = kmalloc(2 + (len * 2), GFP_KERNEL);
425 if (user == NULL) { 425 if (user == NULL) {
426 cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n"); 426 cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
427 rc = -ENOMEM; 427 rc = -ENOMEM;
428 return rc; 428 return rc;
429 } 429 }
430 len = cifs_strtoUCS((__le16 *)user, ses->user_name, len, nls_cp); 430
431 UniStrupr(user); 431 if (len) {
432 len = cifs_strtoUTF16((__le16 *)user, ses->user_name, len, nls_cp);
433 UniStrupr(user);
434 } else {
435 memset(user, '\0', 2);
436 }
432 437
433 rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, 438 rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
434 (char *)user, 2 * len); 439 (char *)user, 2 * len);
@@ -448,8 +453,8 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
448 rc = -ENOMEM; 453 rc = -ENOMEM;
449 return rc; 454 return rc;
450 } 455 }
451 len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len, 456 len = cifs_strtoUTF16((__le16 *)domain, ses->domainName, len,
452 nls_cp); 457 nls_cp);
453 rc = 458 rc =
454 crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, 459 crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
455 (char *)domain, 2 * len); 460 (char *)domain, 2 * len);
@@ -468,7 +473,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
468 rc = -ENOMEM; 473 rc = -ENOMEM;
469 return rc; 474 return rc;
470 } 475 }
471 len = cifs_strtoUCS((__le16 *)server, ses->serverName, len, 476 len = cifs_strtoUTF16((__le16 *)server, ses->serverName, len,
472 nls_cp); 477 nls_cp);
473 rc = 478 rc =
474 crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, 479 crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index ba53c1c6c6cc..76e7d8b6da17 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -879,6 +879,8 @@ require use of the stronger protocol */
879#define CIFSSEC_MASK 0xB70B7 /* current flags supported if weak */ 879#define CIFSSEC_MASK 0xB70B7 /* current flags supported if weak */
880#endif /* UPCALL */ 880#endif /* UPCALL */
881#else /* do not allow weak pw hash */ 881#else /* do not allow weak pw hash */
882#define CIFSSEC_MUST_LANMAN 0
883#define CIFSSEC_MUST_PLNTXT 0
882#ifdef CONFIG_CIFS_UPCALL 884#ifdef CONFIG_CIFS_UPCALL
883#define CIFSSEC_MASK 0x8F08F /* flags supported if no weak allowed */ 885#define CIFSSEC_MASK 0x8F08F /* flags supported if no weak allowed */
884#else 886#else
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 6600aa2d2ef3..8b7794c31591 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -821,8 +821,8 @@ PsxDelete:
821 821
822 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 822 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
823 name_len = 823 name_len =
824 cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, 824 cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
825 PATH_MAX, nls_codepage, remap); 825 PATH_MAX, nls_codepage, remap);
826 name_len++; /* trailing null */ 826 name_len++; /* trailing null */
827 name_len *= 2; 827 name_len *= 2;
828 } else { /* BB add path length overrun check */ 828 } else { /* BB add path length overrun check */
@@ -893,8 +893,8 @@ DelFileRetry:
893 893
894 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 894 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
895 name_len = 895 name_len =
896 cifsConvertToUCS((__le16 *) pSMB->fileName, fileName, 896 cifsConvertToUTF16((__le16 *) pSMB->fileName, fileName,
897 PATH_MAX, nls_codepage, remap); 897 PATH_MAX, nls_codepage, remap);
898 name_len++; /* trailing null */ 898 name_len++; /* trailing null */
899 name_len *= 2; 899 name_len *= 2;
900 } else { /* BB improve check for buffer overruns BB */ 900 } else { /* BB improve check for buffer overruns BB */
@@ -938,8 +938,8 @@ RmDirRetry:
938 return rc; 938 return rc;
939 939
940 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 940 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
941 name_len = cifsConvertToUCS((__le16 *) pSMB->DirName, dirName, 941 name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, dirName,
942 PATH_MAX, nls_codepage, remap); 942 PATH_MAX, nls_codepage, remap);
943 name_len++; /* trailing null */ 943 name_len++; /* trailing null */
944 name_len *= 2; 944 name_len *= 2;
945 } else { /* BB improve check for buffer overruns BB */ 945 } else { /* BB improve check for buffer overruns BB */
@@ -981,8 +981,8 @@ MkDirRetry:
981 return rc; 981 return rc;
982 982
983 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 983 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
984 name_len = cifsConvertToUCS((__le16 *) pSMB->DirName, name, 984 name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, name,
985 PATH_MAX, nls_codepage, remap); 985 PATH_MAX, nls_codepage, remap);
986 name_len++; /* trailing null */ 986 name_len++; /* trailing null */
987 name_len *= 2; 987 name_len *= 2;
988 } else { /* BB improve check for buffer overruns BB */ 988 } else { /* BB improve check for buffer overruns BB */
@@ -1030,8 +1030,8 @@ PsxCreat:
1030 1030
1031 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 1031 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
1032 name_len = 1032 name_len =
1033 cifsConvertToUCS((__le16 *) pSMB->FileName, name, 1033 cifsConvertToUTF16((__le16 *) pSMB->FileName, name,
1034 PATH_MAX, nls_codepage, remap); 1034 PATH_MAX, nls_codepage, remap);
1035 name_len++; /* trailing null */ 1035 name_len++; /* trailing null */
1036 name_len *= 2; 1036 name_len *= 2;
1037 } else { /* BB improve the check for buffer overruns BB */ 1037 } else { /* BB improve the check for buffer overruns BB */
@@ -1197,8 +1197,8 @@ OldOpenRetry:
1197 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 1197 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
1198 count = 1; /* account for one byte pad to word boundary */ 1198 count = 1; /* account for one byte pad to word boundary */
1199 name_len = 1199 name_len =
1200 cifsConvertToUCS((__le16 *) (pSMB->fileName + 1), 1200 cifsConvertToUTF16((__le16 *) (pSMB->fileName + 1),
1201 fileName, PATH_MAX, nls_codepage, remap); 1201 fileName, PATH_MAX, nls_codepage, remap);
1202 name_len++; /* trailing null */ 1202 name_len++; /* trailing null */
1203 name_len *= 2; 1203 name_len *= 2;
1204 } else { /* BB improve check for buffer overruns BB */ 1204 } else { /* BB improve check for buffer overruns BB */
@@ -1304,8 +1304,8 @@ openRetry:
1304 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 1304 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
1305 count = 1; /* account for one byte pad to word boundary */ 1305 count = 1; /* account for one byte pad to word boundary */
1306 name_len = 1306 name_len =
1307 cifsConvertToUCS((__le16 *) (pSMB->fileName + 1), 1307 cifsConvertToUTF16((__le16 *) (pSMB->fileName + 1),
1308 fileName, PATH_MAX, nls_codepage, remap); 1308 fileName, PATH_MAX, nls_codepage, remap);
1309 name_len++; /* trailing null */ 1309 name_len++; /* trailing null */
1310 name_len *= 2; 1310 name_len *= 2;
1311 pSMB->NameLength = cpu_to_le16(name_len); 1311 pSMB->NameLength = cpu_to_le16(name_len);
@@ -2649,16 +2649,16 @@ renameRetry:
2649 2649
2650 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 2650 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
2651 name_len = 2651 name_len =
2652 cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName, 2652 cifsConvertToUTF16((__le16 *) pSMB->OldFileName, fromName,
2653 PATH_MAX, nls_codepage, remap); 2653 PATH_MAX, nls_codepage, remap);
2654 name_len++; /* trailing null */ 2654 name_len++; /* trailing null */
2655 name_len *= 2; 2655 name_len *= 2;
2656 pSMB->OldFileName[name_len] = 0x04; /* pad */ 2656 pSMB->OldFileName[name_len] = 0x04; /* pad */
2657 /* protocol requires ASCII signature byte on Unicode string */ 2657 /* protocol requires ASCII signature byte on Unicode string */
2658 pSMB->OldFileName[name_len + 1] = 0x00; 2658 pSMB->OldFileName[name_len + 1] = 0x00;
2659 name_len2 = 2659 name_len2 =
2660 cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2], 2660 cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2],
2661 toName, PATH_MAX, nls_codepage, remap); 2661 toName, PATH_MAX, nls_codepage, remap);
2662 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; 2662 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
2663 name_len2 *= 2; /* convert to bytes */ 2663 name_len2 *= 2; /* convert to bytes */
2664 } else { /* BB improve the check for buffer overruns BB */ 2664 } else { /* BB improve the check for buffer overruns BB */
@@ -2738,10 +2738,12 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon,
2738 /* unicode only call */ 2738 /* unicode only call */
2739 if (target_name == NULL) { 2739 if (target_name == NULL) {
2740 sprintf(dummy_string, "cifs%x", pSMB->hdr.Mid); 2740 sprintf(dummy_string, "cifs%x", pSMB->hdr.Mid);
2741 len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name, 2741 len_of_str =
2742 cifsConvertToUTF16((__le16 *)rename_info->target_name,
2742 dummy_string, 24, nls_codepage, remap); 2743 dummy_string, 24, nls_codepage, remap);
2743 } else { 2744 } else {
2744 len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name, 2745 len_of_str =
2746 cifsConvertToUTF16((__le16 *)rename_info->target_name,
2745 target_name, PATH_MAX, nls_codepage, 2747 target_name, PATH_MAX, nls_codepage,
2746 remap); 2748 remap);
2747 } 2749 }
@@ -2795,17 +2797,17 @@ copyRetry:
2795 pSMB->Flags = cpu_to_le16(flags & COPY_TREE); 2797 pSMB->Flags = cpu_to_le16(flags & COPY_TREE);
2796 2798
2797 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 2799 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
2798 name_len = cifsConvertToUCS((__le16 *) pSMB->OldFileName, 2800 name_len = cifsConvertToUTF16((__le16 *) pSMB->OldFileName,
2799 fromName, PATH_MAX, nls_codepage, 2801 fromName, PATH_MAX, nls_codepage,
2800 remap); 2802 remap);
2801 name_len++; /* trailing null */ 2803 name_len++; /* trailing null */
2802 name_len *= 2; 2804 name_len *= 2;
2803 pSMB->OldFileName[name_len] = 0x04; /* pad */ 2805 pSMB->OldFileName[name_len] = 0x04; /* pad */
2804 /* protocol requires ASCII signature byte on Unicode string */ 2806 /* protocol requires ASCII signature byte on Unicode string */
2805 pSMB->OldFileName[name_len + 1] = 0x00; 2807 pSMB->OldFileName[name_len + 1] = 0x00;
2806 name_len2 = 2808 name_len2 =
2807 cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2], 2809 cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2],
2808 toName, PATH_MAX, nls_codepage, remap); 2810 toName, PATH_MAX, nls_codepage, remap);
2809 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; 2811 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
2810 name_len2 *= 2; /* convert to bytes */ 2812 name_len2 *= 2; /* convert to bytes */
2811 } else { /* BB improve the check for buffer overruns BB */ 2813 } else { /* BB improve the check for buffer overruns BB */
@@ -2861,9 +2863,9 @@ createSymLinkRetry:
2861 2863
2862 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 2864 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
2863 name_len = 2865 name_len =
2864 cifs_strtoUCS((__le16 *) pSMB->FileName, fromName, PATH_MAX 2866 cifs_strtoUTF16((__le16 *) pSMB->FileName, fromName,
2865 /* find define for this maxpathcomponent */ 2867 /* find define for this maxpathcomponent */
2866 , nls_codepage); 2868 PATH_MAX, nls_codepage);
2867 name_len++; /* trailing null */ 2869 name_len++; /* trailing null */
2868 name_len *= 2; 2870 name_len *= 2;
2869 2871
@@ -2885,9 +2887,9 @@ createSymLinkRetry:
2885 data_offset = (char *) (&pSMB->hdr.Protocol) + offset; 2887 data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
2886 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 2888 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
2887 name_len_target = 2889 name_len_target =
2888 cifs_strtoUCS((__le16 *) data_offset, toName, PATH_MAX 2890 cifs_strtoUTF16((__le16 *) data_offset, toName, PATH_MAX
2889 /* find define for this maxpathcomponent */ 2891 /* find define for this maxpathcomponent */
2890 , nls_codepage); 2892 , nls_codepage);
2891 name_len_target++; /* trailing null */ 2893 name_len_target++; /* trailing null */
2892 name_len_target *= 2; 2894 name_len_target *= 2;
2893 } else { /* BB improve the check for buffer overruns BB */ 2895 } else { /* BB improve the check for buffer overruns BB */
@@ -2949,8 +2951,8 @@ createHardLinkRetry:
2949 return rc; 2951 return rc;
2950 2952
2951 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 2953 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
2952 name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, toName, 2954 name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, toName,
2953 PATH_MAX, nls_codepage, remap); 2955 PATH_MAX, nls_codepage, remap);
2954 name_len++; /* trailing null */ 2956 name_len++; /* trailing null */
2955 name_len *= 2; 2957 name_len *= 2;
2956 2958
@@ -2972,8 +2974,8 @@ createHardLinkRetry:
2972 data_offset = (char *) (&pSMB->hdr.Protocol) + offset; 2974 data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
2973 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 2975 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
2974 name_len_target = 2976 name_len_target =
2975 cifsConvertToUCS((__le16 *) data_offset, fromName, PATH_MAX, 2977 cifsConvertToUTF16((__le16 *) data_offset, fromName,
2976 nls_codepage, remap); 2978 PATH_MAX, nls_codepage, remap);
2977 name_len_target++; /* trailing null */ 2979 name_len_target++; /* trailing null */
2978 name_len_target *= 2; 2980 name_len_target *= 2;
2979 } else { /* BB improve the check for buffer overruns BB */ 2981 } else { /* BB improve the check for buffer overruns BB */
@@ -3042,8 +3044,8 @@ winCreateHardLinkRetry:
3042 3044
3043 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 3045 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
3044 name_len = 3046 name_len =
3045 cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName, 3047 cifsConvertToUTF16((__le16 *) pSMB->OldFileName, fromName,
3046 PATH_MAX, nls_codepage, remap); 3048 PATH_MAX, nls_codepage, remap);
3047 name_len++; /* trailing null */ 3049 name_len++; /* trailing null */
3048 name_len *= 2; 3050 name_len *= 2;
3049 3051
@@ -3051,8 +3053,8 @@ winCreateHardLinkRetry:
3051 pSMB->OldFileName[name_len] = 0x04; 3053 pSMB->OldFileName[name_len] = 0x04;
3052 pSMB->OldFileName[name_len + 1] = 0x00; /* pad */ 3054 pSMB->OldFileName[name_len + 1] = 0x00; /* pad */
3053 name_len2 = 3055 name_len2 =
3054 cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2], 3056 cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2],
3055 toName, PATH_MAX, nls_codepage, remap); 3057 toName, PATH_MAX, nls_codepage, remap);
3056 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; 3058 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
3057 name_len2 *= 2; /* convert to bytes */ 3059 name_len2 *= 2; /* convert to bytes */
3058 } else { /* BB improve the check for buffer overruns BB */ 3060 } else { /* BB improve the check for buffer overruns BB */
@@ -3108,8 +3110,8 @@ querySymLinkRetry:
3108 3110
3109 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 3111 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
3110 name_len = 3112 name_len =
3111 cifs_strtoUCS((__le16 *) pSMB->FileName, searchName, 3113 cifs_strtoUTF16((__le16 *) pSMB->FileName, searchName,
3112 PATH_MAX, nls_codepage); 3114 PATH_MAX, nls_codepage);
3113 name_len++; /* trailing null */ 3115 name_len++; /* trailing null */
3114 name_len *= 2; 3116 name_len *= 2;
3115 } else { /* BB improve the check for buffer overruns BB */ 3117 } else { /* BB improve the check for buffer overruns BB */
@@ -3166,8 +3168,8 @@ querySymLinkRetry:
3166 is_unicode = false; 3168 is_unicode = false;
3167 3169
3168 /* BB FIXME investigate remapping reserved chars here */ 3170 /* BB FIXME investigate remapping reserved chars here */
3169 *symlinkinfo = cifs_strndup_from_ucs(data_start, count, 3171 *symlinkinfo = cifs_strndup_from_utf16(data_start,
3170 is_unicode, nls_codepage); 3172 count, is_unicode, nls_codepage);
3171 if (!*symlinkinfo) 3173 if (!*symlinkinfo)
3172 rc = -ENOMEM; 3174 rc = -ENOMEM;
3173 } 3175 }
@@ -3450,8 +3452,9 @@ queryAclRetry:
3450 3452
3451 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 3453 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
3452 name_len = 3454 name_len =
3453 cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, 3455 cifsConvertToUTF16((__le16 *) pSMB->FileName,
3454 PATH_MAX, nls_codepage, remap); 3456 searchName, PATH_MAX, nls_codepage,
3457 remap);
3455 name_len++; /* trailing null */ 3458 name_len++; /* trailing null */
3456 name_len *= 2; 3459 name_len *= 2;
3457 pSMB->FileName[name_len] = 0; 3460 pSMB->FileName[name_len] = 0;
@@ -3537,8 +3540,8 @@ setAclRetry:
3537 return rc; 3540 return rc;
3538 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 3541 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
3539 name_len = 3542 name_len =
3540 cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, 3543 cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
3541 PATH_MAX, nls_codepage, remap); 3544 PATH_MAX, nls_codepage, remap);
3542 name_len++; /* trailing null */ 3545 name_len++; /* trailing null */
3543 name_len *= 2; 3546 name_len *= 2;
3544 } else { /* BB improve the check for buffer overruns BB */ 3547 } else { /* BB improve the check for buffer overruns BB */
@@ -3948,8 +3951,9 @@ QInfRetry:
3948 3951
3949 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 3952 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
3950 name_len = 3953 name_len =
3951 cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, 3954 cifsConvertToUTF16((__le16 *) pSMB->FileName,
3952 PATH_MAX, nls_codepage, remap); 3955 searchName, PATH_MAX, nls_codepage,
3956 remap);
3953 name_len++; /* trailing null */ 3957 name_len++; /* trailing null */
3954 name_len *= 2; 3958 name_len *= 2;
3955 } else { 3959 } else {
@@ -4086,8 +4090,8 @@ QPathInfoRetry:
4086 4090
4087 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 4091 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
4088 name_len = 4092 name_len =
4089 cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, 4093 cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
4090 PATH_MAX, nls_codepage, remap); 4094 PATH_MAX, nls_codepage, remap);
4091 name_len++; /* trailing null */ 4095 name_len++; /* trailing null */
4092 name_len *= 2; 4096 name_len *= 2;
4093 } else { /* BB improve the check for buffer overruns BB */ 4097 } else { /* BB improve the check for buffer overruns BB */
@@ -4255,8 +4259,8 @@ UnixQPathInfoRetry:
4255 4259
4256 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 4260 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
4257 name_len = 4261 name_len =
4258 cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, 4262 cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
4259 PATH_MAX, nls_codepage, remap); 4263 PATH_MAX, nls_codepage, remap);
4260 name_len++; /* trailing null */ 4264 name_len++; /* trailing null */
4261 name_len *= 2; 4265 name_len *= 2;
4262 } else { /* BB improve the check for buffer overruns BB */ 4266 } else { /* BB improve the check for buffer overruns BB */
@@ -4344,8 +4348,8 @@ findFirstRetry:
4344 4348
4345 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 4349 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
4346 name_len = 4350 name_len =
4347 cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, 4351 cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
4348 PATH_MAX, nls_codepage, remap); 4352 PATH_MAX, nls_codepage, remap);
4349 /* We can not add the asterik earlier in case 4353 /* We can not add the asterik earlier in case
4350 it got remapped to 0xF03A as if it were part of the 4354 it got remapped to 0xF03A as if it were part of the
4351 directory name instead of a wildcard */ 4355 directory name instead of a wildcard */
@@ -4656,8 +4660,9 @@ GetInodeNumberRetry:
4656 4660
4657 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 4661 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
4658 name_len = 4662 name_len =
4659 cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, 4663 cifsConvertToUTF16((__le16 *) pSMB->FileName,
4660 PATH_MAX, nls_codepage, remap); 4664 searchName, PATH_MAX, nls_codepage,
4665 remap);
4661 name_len++; /* trailing null */ 4666 name_len++; /* trailing null */
4662 name_len *= 2; 4667 name_len *= 2;
4663 } else { /* BB improve the check for buffer overruns BB */ 4668 } else { /* BB improve the check for buffer overruns BB */
@@ -4794,9 +4799,9 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
4794 rc = -ENOMEM; 4799 rc = -ENOMEM;
4795 goto parse_DFS_referrals_exit; 4800 goto parse_DFS_referrals_exit;
4796 } 4801 }
4797 cifsConvertToUCS((__le16 *) tmp, searchName, 4802 cifsConvertToUTF16((__le16 *) tmp, searchName,
4798 PATH_MAX, nls_codepage, remap); 4803 PATH_MAX, nls_codepage, remap);
4799 node->path_consumed = cifs_ucs2_bytes(tmp, 4804 node->path_consumed = cifs_utf16_bytes(tmp,
4800 le16_to_cpu(pSMBr->PathConsumed), 4805 le16_to_cpu(pSMBr->PathConsumed),
4801 nls_codepage); 4806 nls_codepage);
4802 kfree(tmp); 4807 kfree(tmp);
@@ -4809,8 +4814,8 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
4809 /* copy DfsPath */ 4814 /* copy DfsPath */
4810 temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset); 4815 temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
4811 max_len = data_end - temp; 4816 max_len = data_end - temp;
4812 node->path_name = cifs_strndup_from_ucs(temp, max_len, 4817 node->path_name = cifs_strndup_from_utf16(temp, max_len,
4813 is_unicode, nls_codepage); 4818 is_unicode, nls_codepage);
4814 if (!node->path_name) { 4819 if (!node->path_name) {
4815 rc = -ENOMEM; 4820 rc = -ENOMEM;
4816 goto parse_DFS_referrals_exit; 4821 goto parse_DFS_referrals_exit;
@@ -4819,8 +4824,8 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
4819 /* copy link target UNC */ 4824 /* copy link target UNC */
4820 temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset); 4825 temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
4821 max_len = data_end - temp; 4826 max_len = data_end - temp;
4822 node->node_name = cifs_strndup_from_ucs(temp, max_len, 4827 node->node_name = cifs_strndup_from_utf16(temp, max_len,
4823 is_unicode, nls_codepage); 4828 is_unicode, nls_codepage);
4824 if (!node->node_name) 4829 if (!node->node_name)
4825 rc = -ENOMEM; 4830 rc = -ENOMEM;
4826 } 4831 }
@@ -4873,8 +4878,9 @@ getDFSRetry:
4873 if (ses->capabilities & CAP_UNICODE) { 4878 if (ses->capabilities & CAP_UNICODE) {
4874 pSMB->hdr.Flags2 |= SMBFLG2_UNICODE; 4879 pSMB->hdr.Flags2 |= SMBFLG2_UNICODE;
4875 name_len = 4880 name_len =
4876 cifsConvertToUCS((__le16 *) pSMB->RequestFileName, 4881 cifsConvertToUTF16((__le16 *) pSMB->RequestFileName,
4877 searchName, PATH_MAX, nls_codepage, remap); 4882 searchName, PATH_MAX, nls_codepage,
4883 remap);
4878 name_len++; /* trailing null */ 4884 name_len++; /* trailing null */
4879 name_len *= 2; 4885 name_len *= 2;
4880 } else { /* BB improve the check for buffer overruns BB */ 4886 } else { /* BB improve the check for buffer overruns BB */
@@ -5506,8 +5512,8 @@ SetEOFRetry:
5506 5512
5507 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 5513 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
5508 name_len = 5514 name_len =
5509 cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, 5515 cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
5510 PATH_MAX, nls_codepage, remap); 5516 PATH_MAX, nls_codepage, remap);
5511 name_len++; /* trailing null */ 5517 name_len++; /* trailing null */
5512 name_len *= 2; 5518 name_len *= 2;
5513 } else { /* BB improve the check for buffer overruns BB */ 5519 } else { /* BB improve the check for buffer overruns BB */
@@ -5796,8 +5802,8 @@ SetTimesRetry:
5796 5802
5797 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 5803 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
5798 name_len = 5804 name_len =
5799 cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, 5805 cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
5800 PATH_MAX, nls_codepage, remap); 5806 PATH_MAX, nls_codepage, remap);
5801 name_len++; /* trailing null */ 5807 name_len++; /* trailing null */
5802 name_len *= 2; 5808 name_len *= 2;
5803 } else { /* BB improve the check for buffer overruns BB */ 5809 } else { /* BB improve the check for buffer overruns BB */
@@ -5877,8 +5883,8 @@ SetAttrLgcyRetry:
5877 5883
5878 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 5884 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
5879 name_len = 5885 name_len =
5880 ConvertToUCS((__le16 *) pSMB->fileName, fileName, 5886 ConvertToUTF16((__le16 *) pSMB->fileName, fileName,
5881 PATH_MAX, nls_codepage); 5887 PATH_MAX, nls_codepage);
5882 name_len++; /* trailing null */ 5888 name_len++; /* trailing null */
5883 name_len *= 2; 5889 name_len *= 2;
5884 } else { /* BB improve the check for buffer overruns BB */ 5890 } else { /* BB improve the check for buffer overruns BB */
@@ -6030,8 +6036,8 @@ setPermsRetry:
6030 6036
6031 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 6037 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
6032 name_len = 6038 name_len =
6033 cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, 6039 cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
6034 PATH_MAX, nls_codepage, remap); 6040 PATH_MAX, nls_codepage, remap);
6035 name_len++; /* trailing null */ 6041 name_len++; /* trailing null */
6036 name_len *= 2; 6042 name_len *= 2;
6037 } else { /* BB improve the check for buffer overruns BB */ 6043 } else { /* BB improve the check for buffer overruns BB */
@@ -6123,8 +6129,8 @@ QAllEAsRetry:
6123 6129
6124 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 6130 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
6125 list_len = 6131 list_len =
6126 cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, 6132 cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
6127 PATH_MAX, nls_codepage, remap); 6133 PATH_MAX, nls_codepage, remap);
6128 list_len++; /* trailing null */ 6134 list_len++; /* trailing null */
6129 list_len *= 2; 6135 list_len *= 2;
6130 } else { /* BB improve the check for buffer overruns BB */ 6136 } else { /* BB improve the check for buffer overruns BB */
@@ -6301,8 +6307,8 @@ SetEARetry:
6301 6307
6302 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { 6308 if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
6303 name_len = 6309 name_len =
6304 cifsConvertToUCS((__le16 *) pSMB->FileName, fileName, 6310 cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
6305 PATH_MAX, nls_codepage, remap); 6311 PATH_MAX, nls_codepage, remap);
6306 name_len++; /* trailing null */ 6312 name_len++; /* trailing null */
6307 name_len *= 2; 6313 name_len *= 2;
6308 } else { /* BB improve the check for buffer overruns BB */ 6314 } else { /* BB improve the check for buffer overruns BB */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 4666780f315d..986709a8d903 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -38,6 +38,7 @@
38#include <asm/processor.h> 38#include <asm/processor.h>
39#include <linux/inet.h> 39#include <linux/inet.h>
40#include <linux/module.h> 40#include <linux/module.h>
41#include <keys/user-type.h>
41#include <net/ipv6.h> 42#include <net/ipv6.h>
42#include "cifspdu.h" 43#include "cifspdu.h"
43#include "cifsglob.h" 44#include "cifsglob.h"
@@ -225,74 +226,90 @@ static int check2ndT2(struct smb_hdr *pSMB)
225 226
226static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) 227static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
227{ 228{
228 struct smb_t2_rsp *pSMB2 = (struct smb_t2_rsp *)psecond; 229 struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)psecond;
229 struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)pTargetSMB; 230 struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)pTargetSMB;
230 char *data_area_of_target; 231 char *data_area_of_tgt;
231 char *data_area_of_buf2; 232 char *data_area_of_src;
232 int remaining; 233 int remaining;
233 unsigned int byte_count, total_in_buf; 234 unsigned int byte_count, total_in_tgt;
234 __u16 total_data_size, total_in_buf2; 235 __u16 tgt_total_cnt, src_total_cnt, total_in_src;
235 236
236 total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); 237 src_total_cnt = get_unaligned_le16(&pSMBs->t2_rsp.TotalDataCount);
238 tgt_total_cnt = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
237 239
238 if (total_data_size != 240 if (tgt_total_cnt != src_total_cnt)
239 get_unaligned_le16(&pSMB2->t2_rsp.TotalDataCount)) 241 cFYI(1, "total data count of primary and secondary t2 differ "
240 cFYI(1, "total data size of primary and secondary t2 differ"); 242 "source=%hu target=%hu", src_total_cnt, tgt_total_cnt);
241 243
242 total_in_buf = get_unaligned_le16(&pSMBt->t2_rsp.DataCount); 244 total_in_tgt = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
243 245
244 remaining = total_data_size - total_in_buf; 246 remaining = tgt_total_cnt - total_in_tgt;
245 247
246 if (remaining < 0) 248 if (remaining < 0) {
249 cFYI(1, "Server sent too much data. tgt_total_cnt=%hu "
250 "total_in_tgt=%hu", tgt_total_cnt, total_in_tgt);
247 return -EPROTO; 251 return -EPROTO;
252 }
248 253
249 if (remaining == 0) /* nothing to do, ignore */ 254 if (remaining == 0) {
255 /* nothing to do, ignore */
256 cFYI(1, "no more data remains");
250 return 0; 257 return 0;
258 }
251 259
252 total_in_buf2 = get_unaligned_le16(&pSMB2->t2_rsp.DataCount); 260 total_in_src = get_unaligned_le16(&pSMBs->t2_rsp.DataCount);
253 if (remaining < total_in_buf2) { 261 if (remaining < total_in_src)
254 cFYI(1, "transact2 2nd response contains too much data"); 262 cFYI(1, "transact2 2nd response contains too much data");
255 }
256 263
257 /* find end of first SMB data area */ 264 /* find end of first SMB data area */
258 data_area_of_target = (char *)&pSMBt->hdr.Protocol + 265 data_area_of_tgt = (char *)&pSMBt->hdr.Protocol +
259 get_unaligned_le16(&pSMBt->t2_rsp.DataOffset); 266 get_unaligned_le16(&pSMBt->t2_rsp.DataOffset);
260 /* validate target area */
261 267
262 data_area_of_buf2 = (char *)&pSMB2->hdr.Protocol + 268 /* validate target area */
263 get_unaligned_le16(&pSMB2->t2_rsp.DataOffset); 269 data_area_of_src = (char *)&pSMBs->hdr.Protocol +
270 get_unaligned_le16(&pSMBs->t2_rsp.DataOffset);
264 271
265 data_area_of_target += total_in_buf; 272 data_area_of_tgt += total_in_tgt;
266 273
267 /* copy second buffer into end of first buffer */ 274 total_in_tgt += total_in_src;
268 total_in_buf += total_in_buf2;
269 /* is the result too big for the field? */ 275 /* is the result too big for the field? */
270 if (total_in_buf > USHRT_MAX) 276 if (total_in_tgt > USHRT_MAX) {
277 cFYI(1, "coalesced DataCount too large (%u)", total_in_tgt);
271 return -EPROTO; 278 return -EPROTO;
272 put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount); 279 }
280 put_unaligned_le16(total_in_tgt, &pSMBt->t2_rsp.DataCount);
273 281
274 /* fix up the BCC */ 282 /* fix up the BCC */
275 byte_count = get_bcc(pTargetSMB); 283 byte_count = get_bcc(pTargetSMB);
276 byte_count += total_in_buf2; 284 byte_count += total_in_src;
277 /* is the result too big for the field? */ 285 /* is the result too big for the field? */
278 if (byte_count > USHRT_MAX) 286 if (byte_count > USHRT_MAX) {
287 cFYI(1, "coalesced BCC too large (%u)", byte_count);
279 return -EPROTO; 288 return -EPROTO;
289 }
280 put_bcc(byte_count, pTargetSMB); 290 put_bcc(byte_count, pTargetSMB);
281 291
282 byte_count = be32_to_cpu(pTargetSMB->smb_buf_length); 292 byte_count = be32_to_cpu(pTargetSMB->smb_buf_length);
283 byte_count += total_in_buf2; 293 byte_count += total_in_src;
284 /* don't allow buffer to overflow */ 294 /* don't allow buffer to overflow */
285 if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) 295 if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
296 cFYI(1, "coalesced BCC exceeds buffer size (%u)", byte_count);
286 return -ENOBUFS; 297 return -ENOBUFS;
298 }
287 pTargetSMB->smb_buf_length = cpu_to_be32(byte_count); 299 pTargetSMB->smb_buf_length = cpu_to_be32(byte_count);
288 300
289 memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2); 301 /* copy second buffer into end of first buffer */
302 memcpy(data_area_of_tgt, data_area_of_src, total_in_src);
290 303
291 if (remaining == total_in_buf2) { 304 if (remaining != total_in_src) {
292 cFYI(1, "found the last secondary response"); 305 /* more responses to go */
293 return 0; /* we are done */ 306 cFYI(1, "waiting for more secondary responses");
294 } else /* more responses to go */
295 return 1; 307 return 1;
308 }
309
310 /* we are done */
311 cFYI(1, "found the last secondary response");
312 return 0;
296} 313}
297 314
298static void 315static void
@@ -1578,11 +1595,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1578 } 1595 }
1579 } 1596 }
1580 1597
1581 if (vol->multiuser && !(vol->secFlg & CIFSSEC_MAY_KRB5)) { 1598#ifndef CONFIG_KEYS
1582 cERROR(1, "Multiuser mounts currently require krb5 " 1599 /* Muliuser mounts require CONFIG_KEYS support */
1583 "authentication!"); 1600 if (vol->multiuser) {
1601 cERROR(1, "Multiuser mounts require kernels with "
1602 "CONFIG_KEYS enabled.");
1584 goto cifs_parse_mount_err; 1603 goto cifs_parse_mount_err;
1585 } 1604 }
1605#endif
1586 1606
1587 if (vol->UNCip == NULL) 1607 if (vol->UNCip == NULL)
1588 vol->UNCip = &vol->UNC[2]; 1608 vol->UNCip = &vol->UNC[2];
@@ -1981,10 +2001,16 @@ static int match_session(struct cifs_ses *ses, struct smb_vol *vol)
1981 return 0; 2001 return 0;
1982 break; 2002 break;
1983 default: 2003 default:
2004 /* NULL username means anonymous session */
2005 if (ses->user_name == NULL) {
2006 if (!vol->nullauth)
2007 return 0;
2008 break;
2009 }
2010
1984 /* anything else takes username/password */ 2011 /* anything else takes username/password */
1985 if (ses->user_name == NULL) 2012 if (strncmp(ses->user_name,
1986 return 0; 2013 vol->username ? vol->username : "",
1987 if (strncmp(ses->user_name, vol->username,
1988 MAX_USERNAME_SIZE)) 2014 MAX_USERNAME_SIZE))
1989 return 0; 2015 return 0;
1990 if (strlen(vol->username) != 0 && 2016 if (strlen(vol->username) != 0 &&
@@ -2039,6 +2065,132 @@ cifs_put_smb_ses(struct cifs_ses *ses)
2039 cifs_put_tcp_session(server); 2065 cifs_put_tcp_session(server);
2040} 2066}
2041 2067
2068#ifdef CONFIG_KEYS
2069
2070/* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */
2071#define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1)
2072
2073/* Populate username and pw fields from keyring if possible */
2074static int
2075cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
2076{
2077 int rc = 0;
2078 char *desc, *delim, *payload;
2079 ssize_t len;
2080 struct key *key;
2081 struct TCP_Server_Info *server = ses->server;
2082 struct sockaddr_in *sa;
2083 struct sockaddr_in6 *sa6;
2084 struct user_key_payload *upayload;
2085
2086 desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL);
2087 if (!desc)
2088 return -ENOMEM;
2089
2090 /* try to find an address key first */
2091 switch (server->dstaddr.ss_family) {
2092 case AF_INET:
2093 sa = (struct sockaddr_in *)&server->dstaddr;
2094 sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr);
2095 break;
2096 case AF_INET6:
2097 sa6 = (struct sockaddr_in6 *)&server->dstaddr;
2098 sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr);
2099 break;
2100 default:
2101 cFYI(1, "Bad ss_family (%hu)", server->dstaddr.ss_family);
2102 rc = -EINVAL;
2103 goto out_err;
2104 }
2105
2106 cFYI(1, "%s: desc=%s", __func__, desc);
2107 key = request_key(&key_type_logon, desc, "");
2108 if (IS_ERR(key)) {
2109 if (!ses->domainName) {
2110 cFYI(1, "domainName is NULL");
2111 rc = PTR_ERR(key);
2112 goto out_err;
2113 }
2114
2115 /* didn't work, try to find a domain key */
2116 sprintf(desc, "cifs:d:%s", ses->domainName);
2117 cFYI(1, "%s: desc=%s", __func__, desc);
2118 key = request_key(&key_type_logon, desc, "");
2119 if (IS_ERR(key)) {
2120 rc = PTR_ERR(key);
2121 goto out_err;
2122 }
2123 }
2124
2125 down_read(&key->sem);
2126 upayload = key->payload.data;
2127 if (IS_ERR_OR_NULL(upayload)) {
2128 rc = PTR_ERR(key);
2129 goto out_key_put;
2130 }
2131
2132 /* find first : in payload */
2133 payload = (char *)upayload->data;
2134 delim = strnchr(payload, upayload->datalen, ':');
2135 cFYI(1, "payload=%s", payload);
2136 if (!delim) {
2137 cFYI(1, "Unable to find ':' in payload (datalen=%d)",
2138 upayload->datalen);
2139 rc = -EINVAL;
2140 goto out_key_put;
2141 }
2142
2143 len = delim - payload;
2144 if (len > MAX_USERNAME_SIZE || len <= 0) {
2145 cFYI(1, "Bad value from username search (len=%ld)", len);
2146 rc = -EINVAL;
2147 goto out_key_put;
2148 }
2149
2150 vol->username = kstrndup(payload, len, GFP_KERNEL);
2151 if (!vol->username) {
2152 cFYI(1, "Unable to allocate %ld bytes for username", len);
2153 rc = -ENOMEM;
2154 goto out_key_put;
2155 }
2156 cFYI(1, "%s: username=%s", __func__, vol->username);
2157
2158 len = key->datalen - (len + 1);
2159 if (len > MAX_PASSWORD_SIZE || len <= 0) {
2160 cFYI(1, "Bad len for password search (len=%ld)", len);
2161 rc = -EINVAL;
2162 kfree(vol->username);
2163 vol->username = NULL;
2164 goto out_key_put;
2165 }
2166
2167 ++delim;
2168 vol->password = kstrndup(delim, len, GFP_KERNEL);
2169 if (!vol->password) {
2170 cFYI(1, "Unable to allocate %ld bytes for password", len);
2171 rc = -ENOMEM;
2172 kfree(vol->username);
2173 vol->username = NULL;
2174 goto out_key_put;
2175 }
2176
2177out_key_put:
2178 up_read(&key->sem);
2179 key_put(key);
2180out_err:
2181 kfree(desc);
2182 cFYI(1, "%s: returning %d", __func__, rc);
2183 return rc;
2184}
2185#else /* ! CONFIG_KEYS */
2186static inline int
2187cifs_set_cifscreds(struct smb_vol *vol __attribute__((unused)),
2188 struct cifs_ses *ses __attribute__((unused)))
2189{
2190 return -ENOSYS;
2191}
2192#endif /* CONFIG_KEYS */
2193
2042static bool warned_on_ntlm; /* globals init to false automatically */ 2194static bool warned_on_ntlm; /* globals init to false automatically */
2043 2195
2044static struct cifs_ses * 2196static struct cifs_ses *
@@ -2914,18 +3066,33 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
2914#define CIFS_DEFAULT_IOSIZE (1024 * 1024) 3066#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
2915 3067
2916/* 3068/*
2917 * Windows only supports a max of 60k reads. Default to that when posix 3069 * Windows only supports a max of 60kb reads and 65535 byte writes. Default to
2918 * extensions aren't in force. 3070 * those values when posix extensions aren't in force. In actuality here, we
3071 * use 65536 to allow for a write that is a multiple of 4k. Most servers seem
3072 * to be ok with the extra byte even though Windows doesn't send writes that
3073 * are that large.
3074 *
3075 * Citation:
3076 *
3077 * http://blogs.msdn.com/b/openspecification/archive/2009/04/10/smb-maximum-transmit-buffer-size-and-performance-tuning.aspx
2919 */ 3078 */
2920#define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024) 3079#define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
3080#define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
2921 3081
2922static unsigned int 3082static unsigned int
2923cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) 3083cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
2924{ 3084{
2925 __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); 3085 __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
2926 struct TCP_Server_Info *server = tcon->ses->server; 3086 struct TCP_Server_Info *server = tcon->ses->server;
2927 unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize : 3087 unsigned int wsize;
2928 CIFS_DEFAULT_IOSIZE; 3088
3089 /* start with specified wsize, or default */
3090 if (pvolume_info->wsize)
3091 wsize = pvolume_info->wsize;
3092 else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
3093 wsize = CIFS_DEFAULT_IOSIZE;
3094 else
3095 wsize = CIFS_DEFAULT_NON_POSIX_WSIZE;
2929 3096
2930 /* can server support 24-bit write sizes? (via UNIX extensions) */ 3097 /* can server support 24-bit write sizes? (via UNIX extensions) */
2931 if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) 3098 if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
@@ -3136,10 +3303,9 @@ cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
3136 return -EINVAL; 3303 return -EINVAL;
3137 3304
3138 if (volume_info->nullauth) { 3305 if (volume_info->nullauth) {
3139 cFYI(1, "null user"); 3306 cFYI(1, "Anonymous login");
3140 volume_info->username = kzalloc(1, GFP_KERNEL); 3307 kfree(volume_info->username);
3141 if (volume_info->username == NULL) 3308 volume_info->username = NULL;
3142 return -ENOMEM;
3143 } else if (volume_info->username) { 3309 } else if (volume_info->username) {
3144 /* BB fixme parse for domain name here */ 3310 /* BB fixme parse for domain name here */
3145 cFYI(1, "Username: %s", volume_info->username); 3311 cFYI(1, "Username: %s", volume_info->username);
@@ -3478,7 +3644,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
3478 if (ses->capabilities & CAP_UNICODE) { 3644 if (ses->capabilities & CAP_UNICODE) {
3479 smb_buffer->Flags2 |= SMBFLG2_UNICODE; 3645 smb_buffer->Flags2 |= SMBFLG2_UNICODE;
3480 length = 3646 length =
3481 cifs_strtoUCS((__le16 *) bcc_ptr, tree, 3647 cifs_strtoUTF16((__le16 *) bcc_ptr, tree,
3482 6 /* max utf8 char length in bytes */ * 3648 6 /* max utf8 char length in bytes */ *
3483 (/* server len*/ + 256 /* share len */), nls_codepage); 3649 (/* server len*/ + 256 /* share len */), nls_codepage);
3484 bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */ 3650 bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */
@@ -3533,7 +3699,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
3533 3699
3534 /* mostly informational -- no need to fail on error here */ 3700 /* mostly informational -- no need to fail on error here */
3535 kfree(tcon->nativeFileSystem); 3701 kfree(tcon->nativeFileSystem);
3536 tcon->nativeFileSystem = cifs_strndup_from_ucs(bcc_ptr, 3702 tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr,
3537 bytes_left, is_unicode, 3703 bytes_left, is_unicode,
3538 nls_codepage); 3704 nls_codepage);
3539 3705
@@ -3657,16 +3823,38 @@ int cifs_setup_session(unsigned int xid, struct cifs_ses *ses,
3657 return rc; 3823 return rc;
3658} 3824}
3659 3825
3826static int
3827cifs_set_vol_auth(struct smb_vol *vol, struct cifs_ses *ses)
3828{
3829 switch (ses->server->secType) {
3830 case Kerberos:
3831 vol->secFlg = CIFSSEC_MUST_KRB5;
3832 return 0;
3833 case NTLMv2:
3834 vol->secFlg = CIFSSEC_MUST_NTLMV2;
3835 break;
3836 case NTLM:
3837 vol->secFlg = CIFSSEC_MUST_NTLM;
3838 break;
3839 case RawNTLMSSP:
3840 vol->secFlg = CIFSSEC_MUST_NTLMSSP;
3841 break;
3842 case LANMAN:
3843 vol->secFlg = CIFSSEC_MUST_LANMAN;
3844 break;
3845 }
3846
3847 return cifs_set_cifscreds(vol, ses);
3848}
3849
3660static struct cifs_tcon * 3850static struct cifs_tcon *
3661cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid) 3851cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
3662{ 3852{
3853 int rc;
3663 struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb); 3854 struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
3664 struct cifs_ses *ses; 3855 struct cifs_ses *ses;
3665 struct cifs_tcon *tcon = NULL; 3856 struct cifs_tcon *tcon = NULL;
3666 struct smb_vol *vol_info; 3857 struct smb_vol *vol_info;
3667 char username[28]; /* big enough for "krb50x" + hex of ULONG_MAX 6+16 */
3668 /* We used to have this as MAX_USERNAME which is */
3669 /* way too big now (256 instead of 32) */
3670 3858
3671 vol_info = kzalloc(sizeof(*vol_info), GFP_KERNEL); 3859 vol_info = kzalloc(sizeof(*vol_info), GFP_KERNEL);
3672 if (vol_info == NULL) { 3860 if (vol_info == NULL) {
@@ -3674,8 +3862,6 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
3674 goto out; 3862 goto out;
3675 } 3863 }
3676 3864
3677 snprintf(username, sizeof(username), "krb50x%x", fsuid);
3678 vol_info->username = username;
3679 vol_info->local_nls = cifs_sb->local_nls; 3865 vol_info->local_nls = cifs_sb->local_nls;
3680 vol_info->linux_uid = fsuid; 3866 vol_info->linux_uid = fsuid;
3681 vol_info->cred_uid = fsuid; 3867 vol_info->cred_uid = fsuid;
@@ -3685,8 +3871,11 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
3685 vol_info->local_lease = master_tcon->local_lease; 3871 vol_info->local_lease = master_tcon->local_lease;
3686 vol_info->no_linux_ext = !master_tcon->unix_ext; 3872 vol_info->no_linux_ext = !master_tcon->unix_ext;
3687 3873
3688 /* FIXME: allow for other secFlg settings */ 3874 rc = cifs_set_vol_auth(vol_info, master_tcon->ses);
3689 vol_info->secFlg = CIFSSEC_MUST_KRB5; 3875 if (rc) {
3876 tcon = ERR_PTR(rc);
3877 goto out;
3878 }
3690 3879
3691 /* get a reference for the same TCP session */ 3880 /* get a reference for the same TCP session */
3692 spin_lock(&cifs_tcp_ses_lock); 3881 spin_lock(&cifs_tcp_ses_lock);
@@ -3709,6 +3898,8 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
3709 if (ses->capabilities & CAP_UNIX) 3898 if (ses->capabilities & CAP_UNIX)
3710 reset_cifs_unix_caps(0, tcon, NULL, vol_info); 3899 reset_cifs_unix_caps(0, tcon, NULL, vol_info);
3711out: 3900out:
3901 kfree(vol_info->username);
3902 kfree(vol_info->password);
3712 kfree(vol_info); 3903 kfree(vol_info);
3713 3904
3714 return tcon; 3905 return tcon;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index a090bbe6ee29..e2bbc683e018 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -647,10 +647,11 @@ static int cifs_filldir(char *find_entry, struct file *file, filldir_t filldir,
647 647
648 name.name = scratch_buf; 648 name.name = scratch_buf;
649 name.len = 649 name.len =
650 cifs_from_ucs2((char *)name.name, (__le16 *)de.name, 650 cifs_from_utf16((char *)name.name, (__le16 *)de.name,
651 UNICODE_NAME_MAX, 651 UNICODE_NAME_MAX,
652 min(de.namelen, (size_t)max_len), nlt, 652 min_t(size_t, de.namelen,
653 cifs_sb->mnt_cifs_flags & 653 (size_t)max_len), nlt,
654 cifs_sb->mnt_cifs_flags &
654 CIFS_MOUNT_MAP_SPECIAL_CHR); 655 CIFS_MOUNT_MAP_SPECIAL_CHR);
655 name.len -= nls_nullsize(nlt); 656 name.len -= nls_nullsize(nlt);
656 } else { 657 } else {
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 4ec3ee9d72cc..d85efad5765f 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -167,16 +167,16 @@ unicode_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp)
167 int bytes_ret = 0; 167 int bytes_ret = 0;
168 168
169 /* Copy OS version */ 169 /* Copy OS version */
170 bytes_ret = cifs_strtoUCS((__le16 *)bcc_ptr, "Linux version ", 32, 170 bytes_ret = cifs_strtoUTF16((__le16 *)bcc_ptr, "Linux version ", 32,
171 nls_cp); 171 nls_cp);
172 bcc_ptr += 2 * bytes_ret; 172 bcc_ptr += 2 * bytes_ret;
173 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, init_utsname()->release, 173 bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, init_utsname()->release,
174 32, nls_cp); 174 32, nls_cp);
175 bcc_ptr += 2 * bytes_ret; 175 bcc_ptr += 2 * bytes_ret;
176 bcc_ptr += 2; /* trailing null */ 176 bcc_ptr += 2; /* trailing null */
177 177
178 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS, 178 bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS,
179 32, nls_cp); 179 32, nls_cp);
180 bcc_ptr += 2 * bytes_ret; 180 bcc_ptr += 2 * bytes_ret;
181 bcc_ptr += 2; /* trailing null */ 181 bcc_ptr += 2; /* trailing null */
182 182
@@ -197,8 +197,8 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
197 *(bcc_ptr+1) = 0; 197 *(bcc_ptr+1) = 0;
198 bytes_ret = 0; 198 bytes_ret = 0;
199 } else 199 } else
200 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->domainName, 200 bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName,
201 256, nls_cp); 201 256, nls_cp);
202 bcc_ptr += 2 * bytes_ret; 202 bcc_ptr += 2 * bytes_ret;
203 bcc_ptr += 2; /* account for null terminator */ 203 bcc_ptr += 2; /* account for null terminator */
204 204
@@ -226,8 +226,8 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
226 *bcc_ptr = 0; 226 *bcc_ptr = 0;
227 *(bcc_ptr+1) = 0; 227 *(bcc_ptr+1) = 0;
228 } else { 228 } else {
229 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->user_name, 229 bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->user_name,
230 MAX_USERNAME_SIZE, nls_cp); 230 MAX_USERNAME_SIZE, nls_cp);
231 } 231 }
232 bcc_ptr += 2 * bytes_ret; 232 bcc_ptr += 2 * bytes_ret;
233 bcc_ptr += 2; /* account for null termination */ 233 bcc_ptr += 2; /* account for null termination */
@@ -287,7 +287,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifs_ses *ses,
287 cFYI(1, "bleft %d", bleft); 287 cFYI(1, "bleft %d", bleft);
288 288
289 kfree(ses->serverOS); 289 kfree(ses->serverOS);
290 ses->serverOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp); 290 ses->serverOS = cifs_strndup_from_utf16(data, bleft, true, nls_cp);
291 cFYI(1, "serverOS=%s", ses->serverOS); 291 cFYI(1, "serverOS=%s", ses->serverOS);
292 len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2; 292 len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2;
293 data += len; 293 data += len;
@@ -296,7 +296,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifs_ses *ses,
296 return; 296 return;
297 297
298 kfree(ses->serverNOS); 298 kfree(ses->serverNOS);
299 ses->serverNOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp); 299 ses->serverNOS = cifs_strndup_from_utf16(data, bleft, true, nls_cp);
300 cFYI(1, "serverNOS=%s", ses->serverNOS); 300 cFYI(1, "serverNOS=%s", ses->serverNOS);
301 len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2; 301 len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2;
302 data += len; 302 data += len;
@@ -305,7 +305,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifs_ses *ses,
305 return; 305 return;
306 306
307 kfree(ses->serverDomain); 307 kfree(ses->serverDomain);
308 ses->serverDomain = cifs_strndup_from_ucs(data, bleft, true, nls_cp); 308 ses->serverDomain = cifs_strndup_from_utf16(data, bleft, true, nls_cp);
309 cFYI(1, "serverDomain=%s", ses->serverDomain); 309 cFYI(1, "serverDomain=%s", ses->serverDomain);
310 310
311 return; 311 return;
@@ -502,8 +502,8 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
502 tmp += 2; 502 tmp += 2;
503 } else { 503 } else {
504 int len; 504 int len;
505 len = cifs_strtoUCS((__le16 *)tmp, ses->domainName, 505 len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
506 MAX_USERNAME_SIZE, nls_cp); 506 MAX_USERNAME_SIZE, nls_cp);
507 len *= 2; /* unicode is 2 bytes each */ 507 len *= 2; /* unicode is 2 bytes each */
508 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); 508 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
509 sec_blob->DomainName.Length = cpu_to_le16(len); 509 sec_blob->DomainName.Length = cpu_to_le16(len);
@@ -518,8 +518,8 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
518 tmp += 2; 518 tmp += 2;
519 } else { 519 } else {
520 int len; 520 int len;
521 len = cifs_strtoUCS((__le16 *)tmp, ses->user_name, 521 len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
522 MAX_USERNAME_SIZE, nls_cp); 522 MAX_USERNAME_SIZE, nls_cp);
523 len *= 2; /* unicode is 2 bytes each */ 523 len *= 2; /* unicode is 2 bytes each */
524 sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); 524 sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
525 sec_blob->UserName.Length = cpu_to_le16(len); 525 sec_blob->UserName.Length = cpu_to_le16(len);
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 80d850881938..d5cd9aa7eacc 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -213,7 +213,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
213 213
214 /* Password cannot be longer than 128 characters */ 214 /* Password cannot be longer than 128 characters */
215 if (passwd) /* Password must be converted to NT unicode */ 215 if (passwd) /* Password must be converted to NT unicode */
216 len = cifs_strtoUCS(wpwd, passwd, 128, codepage); 216 len = cifs_strtoUTF16(wpwd, passwd, 128, codepage);
217 else { 217 else {
218 len = 0; 218 len = 0;
219 *wpwd = 0; /* Ensure string is null terminated */ 219 *wpwd = 0; /* Ensure string is null terminated */
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index f65d4455c5e5..ef023eef0464 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -540,7 +540,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_blob);
540 * debugfs_print_regs32 - use seq_print to describe a set of registers 540 * debugfs_print_regs32 - use seq_print to describe a set of registers
541 * @s: the seq_file structure being used to generate output 541 * @s: the seq_file structure being used to generate output
542 * @regs: an array if struct debugfs_reg32 structures 542 * @regs: an array if struct debugfs_reg32 structures
543 * @mregs: the length of the above array 543 * @nregs: the length of the above array
544 * @base: the base address to be used in reading the registers 544 * @base: the base address to be used in reading the registers
545 * @prefix: a string to be prefixed to every output line 545 * @prefix: a string to be prefixed to every output line
546 * 546 *
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index 1089f760c847..2de655f5d625 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -77,10 +77,11 @@ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
77 flags = flags & EXT2_FL_USER_MODIFIABLE; 77 flags = flags & EXT2_FL_USER_MODIFIABLE;
78 flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE; 78 flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE;
79 ei->i_flags = flags; 79 ei->i_flags = flags;
80 mutex_unlock(&inode->i_mutex);
81 80
82 ext2_set_inode_flags(inode); 81 ext2_set_inode_flags(inode);
83 inode->i_ctime = CURRENT_TIME_SEC; 82 inode->i_ctime = CURRENT_TIME_SEC;
83 mutex_unlock(&inode->i_mutex);
84
84 mark_inode_dirty(inode); 85 mark_inode_dirty(inode);
85setflags_out: 86setflags_out:
86 mnt_drop_write_file(filp); 87 mnt_drop_write_file(filp);
@@ -88,20 +89,29 @@ setflags_out:
88 } 89 }
89 case EXT2_IOC_GETVERSION: 90 case EXT2_IOC_GETVERSION:
90 return put_user(inode->i_generation, (int __user *) arg); 91 return put_user(inode->i_generation, (int __user *) arg);
91 case EXT2_IOC_SETVERSION: 92 case EXT2_IOC_SETVERSION: {
93 __u32 generation;
94
92 if (!inode_owner_or_capable(inode)) 95 if (!inode_owner_or_capable(inode))
93 return -EPERM; 96 return -EPERM;
94 ret = mnt_want_write_file(filp); 97 ret = mnt_want_write_file(filp);
95 if (ret) 98 if (ret)
96 return ret; 99 return ret;
97 if (get_user(inode->i_generation, (int __user *) arg)) { 100 if (get_user(generation, (int __user *) arg)) {
98 ret = -EFAULT; 101 ret = -EFAULT;
99 } else { 102 goto setversion_out;
100 inode->i_ctime = CURRENT_TIME_SEC;
101 mark_inode_dirty(inode);
102 } 103 }
104
105 mutex_lock(&inode->i_mutex);
106 inode->i_ctime = CURRENT_TIME_SEC;
107 inode->i_generation = generation;
108 mutex_unlock(&inode->i_mutex);
109
110 mark_inode_dirty(inode);
111setversion_out:
103 mnt_drop_write_file(filp); 112 mnt_drop_write_file(filp);
104 return ret; 113 return ret;
114 }
105 case EXT2_IOC_GETRSVSZ: 115 case EXT2_IOC_GETRSVSZ:
106 if (test_opt(inode->i_sb, RESERVATION) 116 if (test_opt(inode->i_sb, RESERVATION)
107 && S_ISREG(inode->i_mode) 117 && S_ISREG(inode->i_mode)
diff --git a/fs/inode.c b/fs/inode.c
index 4fa4f0916af9..fb10d86ffad7 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -322,9 +322,6 @@ EXPORT_SYMBOL(clear_nlink);
322void set_nlink(struct inode *inode, unsigned int nlink) 322void set_nlink(struct inode *inode, unsigned int nlink)
323{ 323{
324 if (!nlink) { 324 if (!nlink) {
325 printk_ratelimited(KERN_INFO
326 "set_nlink() clearing i_nlink on %s inode %li\n",
327 inode->i_sb->s_type->name, inode->i_ino);
328 clear_nlink(inode); 325 clear_nlink(inode);
329 } else { 326 } else {
330 /* Yes, some filesystems do change nlink from zero to one */ 327 /* Yes, some filesystems do change nlink from zero to one */
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 5d1a00a5041b..05f0754f2b46 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -453,8 +453,6 @@ out:
453 * 453 *
454 * Return <0 on error, 0 on success, 1 if there was nothing to clean up. 454 * Return <0 on error, 0 on success, 1 if there was nothing to clean up.
455 * 455 *
456 * Called with the journal lock held.
457 *
458 * This is the only part of the journaling code which really needs to be 456 * This is the only part of the journaling code which really needs to be
459 * aware of transaction aborts. Checkpointing involves writing to the 457 * aware of transaction aborts. Checkpointing involves writing to the
460 * main filesystem area rather than to the journal, so it can proceed 458 * main filesystem area rather than to the journal, so it can proceed
@@ -472,13 +470,14 @@ int cleanup_journal_tail(journal_t *journal)
472 if (is_journal_aborted(journal)) 470 if (is_journal_aborted(journal))
473 return 1; 471 return 1;
474 472
475 /* OK, work out the oldest transaction remaining in the log, and 473 /*
474 * OK, work out the oldest transaction remaining in the log, and
476 * the log block it starts at. 475 * the log block it starts at.
477 * 476 *
478 * If the log is now empty, we need to work out which is the 477 * If the log is now empty, we need to work out which is the
479 * next transaction ID we will write, and where it will 478 * next transaction ID we will write, and where it will
480 * start. */ 479 * start.
481 480 */
482 spin_lock(&journal->j_state_lock); 481 spin_lock(&journal->j_state_lock);
483 spin_lock(&journal->j_list_lock); 482 spin_lock(&journal->j_list_lock);
484 transaction = journal->j_checkpoint_transactions; 483 transaction = journal->j_checkpoint_transactions;
@@ -504,7 +503,25 @@ int cleanup_journal_tail(journal_t *journal)
504 spin_unlock(&journal->j_state_lock); 503 spin_unlock(&journal->j_state_lock);
505 return 1; 504 return 1;
506 } 505 }
506 spin_unlock(&journal->j_state_lock);
507
508 /*
509 * We need to make sure that any blocks that were recently written out
510 * --- perhaps by log_do_checkpoint() --- are flushed out before we
511 * drop the transactions from the journal. It's unlikely this will be
512 * necessary, especially with an appropriately sized journal, but we
513 * need this to guarantee correctness. Fortunately
514 * cleanup_journal_tail() doesn't get called all that often.
515 */
516 if (journal->j_flags & JFS_BARRIER)
517 blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
507 518
519 spin_lock(&journal->j_state_lock);
520 if (!tid_gt(first_tid, journal->j_tail_sequence)) {
521 spin_unlock(&journal->j_state_lock);
522 /* Someone else cleaned up journal so return 0 */
523 return 0;
524 }
508 /* OK, update the superblock to recover the freed space. 525 /* OK, update the superblock to recover the freed space.
509 * Physical blocks come first: have we wrapped beyond the end of 526 * Physical blocks come first: have we wrapped beyond the end of
510 * the log? */ 527 * the log? */
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
index 5b43e96788e6..008bf062fd26 100644
--- a/fs/jbd/recovery.c
+++ b/fs/jbd/recovery.c
@@ -20,6 +20,7 @@
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/jbd.h> 21#include <linux/jbd.h>
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/blkdev.h>
23#endif 24#endif
24 25
25/* 26/*
@@ -263,6 +264,9 @@ int journal_recover(journal_t *journal)
263 err2 = sync_blockdev(journal->j_fs_dev); 264 err2 = sync_blockdev(journal->j_fs_dev);
264 if (!err) 265 if (!err)
265 err = err2; 266 err = err2;
267 /* Flush disk caches to get replayed data on the permanent storage */
268 if (journal->j_flags & JFS_BARRIER)
269 blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
266 270
267 return err; 271 return err;
268} 272}
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index d76ca6ae2b1b..121f77cfef76 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -77,6 +77,8 @@ static int show_stat(struct seq_file *p, void *v)
77 steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; 77 steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
78 guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; 78 guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
79 guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; 79 guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
80 sum += kstat_cpu_irqs_sum(i);
81 sum += arch_irq_stat_cpu(i);
80 82
81 for (j = 0; j < NR_SOFTIRQS; j++) { 83 for (j = 0; j < NR_SOFTIRQS; j++) {
82 unsigned int softirq_stat = kstat_softirqs_cpu(j, i); 84 unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index e418c5abdb0e..7dcd2a250495 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -518,6 +518,9 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
518 if (!page) 518 if (!page)
519 continue; 519 continue;
520 520
521 if (PageReserved(page))
522 continue;
523
521 /* Clear accessed and referenced bits. */ 524 /* Clear accessed and referenced bits. */
522 ptep_test_and_clear_young(vma, addr, pte); 525 ptep_test_and_clear_young(vma, addr, pte);
523 ClearPageReferenced(page); 526 ClearPageReferenced(page);
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 2bfd987f4853..6b009548d2e0 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -179,47 +179,33 @@ static const char *qnx4_checkroot(struct super_block *sb)
179 struct qnx4_inode_entry *rootdir; 179 struct qnx4_inode_entry *rootdir;
180 int rd, rl; 180 int rd, rl;
181 int i, j; 181 int i, j;
182 int found = 0;
183 182
184 if (*(qnx4_sb(sb)->sb->RootDir.di_fname) != '/') { 183 if (*(qnx4_sb(sb)->sb->RootDir.di_fname) != '/')
185 return "no qnx4 filesystem (no root dir)."; 184 return "no qnx4 filesystem (no root dir).";
186 } else { 185 QNX4DEBUG((KERN_NOTICE "QNX4 filesystem found on dev %s.\n", sb->s_id));
187 QNX4DEBUG((KERN_NOTICE "QNX4 filesystem found on dev %s.\n", sb->s_id)); 186 rd = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_blk) - 1;
188 rd = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_blk) - 1; 187 rl = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_size);
189 rl = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_size); 188 for (j = 0; j < rl; j++) {
190 for (j = 0; j < rl; j++) { 189 bh = sb_bread(sb, rd + j); /* root dir, first block */
191 bh = sb_bread(sb, rd + j); /* root dir, first block */ 190 if (bh == NULL)
192 if (bh == NULL) { 191 return "unable to read root entry.";
193 return "unable to read root entry."; 192 rootdir = (struct qnx4_inode_entry *) bh->b_data;
194 } 193 for (i = 0; i < QNX4_INODES_PER_BLOCK; i++, rootdir++) {
195 for (i = 0; i < QNX4_INODES_PER_BLOCK; i++) { 194 QNX4DEBUG((KERN_INFO "rootdir entry found : [%s]\n", rootdir->di_fname));
196 rootdir = (struct qnx4_inode_entry *) (bh->b_data + i * QNX4_DIR_ENTRY_SIZE); 195 if (strcmp(rootdir->di_fname, QNX4_BMNAME) != 0)
197 if (rootdir->di_fname != NULL) { 196 continue;
198 QNX4DEBUG((KERN_INFO "rootdir entry found : [%s]\n", rootdir->di_fname)); 197 qnx4_sb(sb)->BitMap = kmemdup(rootdir,
199 if (!strcmp(rootdir->di_fname, 198 sizeof(struct qnx4_inode_entry),
200 QNX4_BMNAME)) { 199 GFP_KERNEL);
201 found = 1;
202 qnx4_sb(sb)->BitMap = kmemdup(rootdir,
203 sizeof(struct qnx4_inode_entry),
204 GFP_KERNEL);
205 if (!qnx4_sb(sb)->BitMap) {
206 brelse (bh);
207 return "not enough memory for bitmap inode";
208 }/* keep bitmap inode known */
209 break;
210 }
211 }
212 }
213 brelse(bh); 200 brelse(bh);
214 if (found != 0) { 201 if (!qnx4_sb(sb)->BitMap)
215 break; 202 return "not enough memory for bitmap inode";
216 } 203 /* keep bitmap inode known */
217 } 204 return NULL;
218 if (found == 0) {
219 return "bitmap file not found.";
220 } 205 }
206 brelse(bh);
221 } 207 }
222 return NULL; 208 return "bitmap file not found.";
223} 209}
224 210
225static int qnx4_fill_super(struct super_block *s, void *data, int silent) 211static int qnx4_fill_super(struct super_block *s, void *data, int silent)
@@ -270,7 +256,7 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
270 if (IS_ERR(root)) { 256 if (IS_ERR(root)) {
271 printk(KERN_ERR "qnx4: get inode failed\n"); 257 printk(KERN_ERR "qnx4: get inode failed\n");
272 ret = PTR_ERR(root); 258 ret = PTR_ERR(root);
273 goto out; 259 goto outb;
274 } 260 }
275 261
276 ret = -ENOMEM; 262 ret = -ENOMEM;
@@ -283,6 +269,8 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
283 269
284 outi: 270 outi:
285 iput(root); 271 iput(root);
272 outb:
273 kfree(qs->BitMap);
286 out: 274 out:
287 brelse(bh); 275 brelse(bh);
288 outnobh: 276 outnobh:
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 5ec59b20cf76..46741970371b 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2125,6 +2125,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
2125 mutex_unlock(&dqopt->dqio_mutex); 2125 mutex_unlock(&dqopt->dqio_mutex);
2126 goto out_file_init; 2126 goto out_file_init;
2127 } 2127 }
2128 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2129 dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
2128 mutex_unlock(&dqopt->dqio_mutex); 2130 mutex_unlock(&dqopt->dqio_mutex);
2129 spin_lock(&dq_state_lock); 2131 spin_lock(&dq_state_lock);
2130 dqopt->flags |= dquot_state_flag(flags, type); 2132 dqopt->flags |= dquot_state_flag(flags, type);
@@ -2464,7 +2466,7 @@ int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2464 spin_lock(&dq_data_lock); 2466 spin_lock(&dq_data_lock);
2465 ii->dqi_bgrace = mi->dqi_bgrace; 2467 ii->dqi_bgrace = mi->dqi_bgrace;
2466 ii->dqi_igrace = mi->dqi_igrace; 2468 ii->dqi_igrace = mi->dqi_igrace;
2467 ii->dqi_flags = mi->dqi_flags & DQF_MASK; 2469 ii->dqi_flags = mi->dqi_flags & DQF_GETINFO_MASK;
2468 ii->dqi_valid = IIF_ALL; 2470 ii->dqi_valid = IIF_ALL;
2469 spin_unlock(&dq_data_lock); 2471 spin_unlock(&dq_data_lock);
2470 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 2472 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
@@ -2490,8 +2492,8 @@ int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2490 if (ii->dqi_valid & IIF_IGRACE) 2492 if (ii->dqi_valid & IIF_IGRACE)
2491 mi->dqi_igrace = ii->dqi_igrace; 2493 mi->dqi_igrace = ii->dqi_igrace;
2492 if (ii->dqi_valid & IIF_FLAGS) 2494 if (ii->dqi_valid & IIF_FLAGS)
2493 mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | 2495 mi->dqi_flags = (mi->dqi_flags & ~DQF_SETINFO_MASK) |
2494 (ii->dqi_flags & DQF_MASK); 2496 (ii->dqi_flags & DQF_SETINFO_MASK);
2495 spin_unlock(&dq_data_lock); 2497 spin_unlock(&dq_data_lock);
2496 mark_info_dirty(sb, type); 2498 mark_info_dirty(sb, type);
2497 /* Force write to disk */ 2499 /* Force write to disk */
diff --git a/fs/super.c b/fs/super.c
index de41e1e46f09..6015c02296b7 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1186,6 +1186,8 @@ int freeze_super(struct super_block *sb)
1186 printk(KERN_ERR 1186 printk(KERN_ERR
1187 "VFS:Filesystem freeze failed\n"); 1187 "VFS:Filesystem freeze failed\n");
1188 sb->s_frozen = SB_UNFROZEN; 1188 sb->s_frozen = SB_UNFROZEN;
1189 smp_wmb();
1190 wake_up(&sb->s_wait_unfrozen);
1189 deactivate_locked_super(sb); 1191 deactivate_locked_super(sb);
1190 return ret; 1192 return ret;
1191 } 1193 }
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index fc1575fd4596..5b5af0d30a97 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -58,6 +58,7 @@
58#define METHOD_NAME__PRT "_PRT" 58#define METHOD_NAME__PRT "_PRT"
59#define METHOD_NAME__CRS "_CRS" 59#define METHOD_NAME__CRS "_CRS"
60#define METHOD_NAME__PRS "_PRS" 60#define METHOD_NAME__PRS "_PRS"
61#define METHOD_NAME__AEI "_AEI"
61#define METHOD_NAME__PRW "_PRW" 62#define METHOD_NAME__PRW "_PRW"
62#define METHOD_NAME__SRS "_SRS" 63#define METHOD_NAME__SRS "_SRS"
63 64
diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
index 173972672175..451823cb8837 100644
--- a/include/acpi/acpi_numa.h
+++ b/include/acpi/acpi_numa.h
@@ -15,6 +15,7 @@ extern int pxm_to_node(int);
15extern int node_to_pxm(int); 15extern int node_to_pxm(int);
16extern void __acpi_map_pxm_to_node(int, int); 16extern void __acpi_map_pxm_to_node(int, int);
17extern int acpi_map_pxm_to_node(int); 17extern int acpi_map_pxm_to_node(int);
18extern unsigned char acpi_srat_revision;
18 19
19#endif /* CONFIG_ACPI_NUMA */ 20#endif /* CONFIG_ACPI_NUMA */
20#endif /* __ACP_NUMA_H */ 21#endif /* __ACP_NUMA_H */
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 83062ed0ef2f..2fe8639b3ae7 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -238,13 +238,6 @@ acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id,
238/* 238/*
239 * Miscellaneous 239 * Miscellaneous
240 */ 240 */
241acpi_status
242acpi_os_validate_address(u8 space_id, acpi_physical_address address,
243 acpi_size length, char *name);
244acpi_status
245acpi_os_invalidate_address(u8 space_id, acpi_physical_address address,
246 acpi_size length);
247
248u64 acpi_os_get_timer(void); 241u64 acpi_os_get_timer(void);
249 242
250acpi_status acpi_os_signal(u32 function, void *info); 243acpi_status acpi_os_signal(u32 function, void *info);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 7762bc2d8404..a28da35ba45e 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,7 +47,7 @@
47 47
48/* Current ACPICA subsystem version in YYYYMMDD format */ 48/* Current ACPICA subsystem version in YYYYMMDD format */
49 49
50#define ACPI_CA_VERSION 0x20110623 50#define ACPI_CA_VERSION 0x20120111
51 51
52#include "actypes.h" 52#include "actypes.h"
53#include "actbl.h" 53#include "actbl.h"
@@ -74,6 +74,7 @@ extern u8 acpi_gbl_disable_auto_repair;
74extern u32 acpi_current_gpe_count; 74extern u32 acpi_current_gpe_count;
75extern struct acpi_table_fadt acpi_gbl_FADT; 75extern struct acpi_table_fadt acpi_gbl_FADT;
76extern u8 acpi_gbl_system_awake_and_running; 76extern u8 acpi_gbl_system_awake_and_running;
77extern u8 acpi_gbl_reduced_hardware; /* ACPI 5.0 */
77 78
78extern u32 acpi_rsdt_forced; 79extern u32 acpi_rsdt_forced;
79/* 80/*
@@ -111,6 +112,11 @@ acpi_status acpi_install_interface(acpi_string interface_name);
111 112
112acpi_status acpi_remove_interface(acpi_string interface_name); 113acpi_status acpi_remove_interface(acpi_string interface_name);
113 114
115u32
116acpi_check_address_range(acpi_adr_space_type space_id,
117 acpi_physical_address address,
118 acpi_size length, u8 warn);
119
114/* 120/*
115 * ACPI Memory management 121 * ACPI Memory management
116 */ 122 */
@@ -276,12 +282,23 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler);
276acpi_status acpi_install_interface_handler(acpi_interface_handler handler); 282acpi_status acpi_install_interface_handler(acpi_interface_handler handler);
277 283
278/* 284/*
279 * Event interfaces 285 * Global Lock interfaces
280 */ 286 */
281acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle); 287acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle);
282 288
283acpi_status acpi_release_global_lock(u32 handle); 289acpi_status acpi_release_global_lock(u32 handle);
284 290
291/*
292 * Interfaces to AML mutex objects
293 */
294acpi_status
295acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout);
296
297acpi_status acpi_release_mutex(acpi_handle handle, acpi_string pathname);
298
299/*
300 * Fixed Event interfaces
301 */
285acpi_status acpi_enable_event(u32 event, u32 flags); 302acpi_status acpi_enable_event(u32 event, u32 flags);
286 303
287acpi_status acpi_disable_event(u32 event, u32 flags); 304acpi_status acpi_disable_event(u32 event, u32 flags);
@@ -291,7 +308,7 @@ acpi_status acpi_clear_event(u32 event);
291acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status); 308acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status);
292 309
293/* 310/*
294 * GPE Interfaces 311 * General Purpose Event (GPE) Interfaces
295 */ 312 */
296acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number); 313acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number);
297 314
@@ -346,6 +363,10 @@ acpi_get_possible_resources(acpi_handle device, struct acpi_buffer *ret_buffer);
346#endif 363#endif
347 364
348acpi_status 365acpi_status
366acpi_get_event_resources(acpi_handle device_handle,
367 struct acpi_buffer *ret_buffer);
368
369acpi_status
349acpi_walk_resources(acpi_handle device, 370acpi_walk_resources(acpi_handle device,
350 char *name, 371 char *name,
351 acpi_walk_resource_callback user_function, void *context); 372 acpi_walk_resource_callback user_function, void *context);
@@ -360,6 +381,11 @@ acpi_status
360acpi_resource_to_address64(struct acpi_resource *resource, 381acpi_resource_to_address64(struct acpi_resource *resource,
361 struct acpi_resource_address64 *out); 382 struct acpi_resource_address64 *out);
362 383
384acpi_status
385acpi_buffer_to_resource(u8 *aml_buffer,
386 u16 aml_buffer_length,
387 struct acpi_resource **resource_ptr);
388
363/* 389/*
364 * Hardware (ACPI device) interfaces 390 * Hardware (ACPI device) interfaces
365 */ 391 */
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index 0a66cc45dd6b..3506e39a66b1 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -61,11 +61,14 @@ typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (Length+3) = (6
61#define ACPI_WRITE_COMBINING_MEMORY (u8) 0x02 61#define ACPI_WRITE_COMBINING_MEMORY (u8) 0x02
62#define ACPI_PREFETCHABLE_MEMORY (u8) 0x03 62#define ACPI_PREFETCHABLE_MEMORY (u8) 0x03
63 63
64/*! [Begin] no source code translation */
64/* 65/*
65 * IO Attributes 66 * IO Attributes
66 * The ISA IO ranges are: n000-n0_fFh, n400-n4_fFh, n800-n8_fFh, n_c00-n_cFFh. 67 * The ISA IO ranges are: n000-n0FFh, n400-n4FFh, n800-n8FFh, nC00-nCFFh.
67 * The non-ISA IO ranges are: n100-n3_fFh, n500-n7_fFh, n900-n_bFFh, n_cd0-n_fFFh. 68 * The non-ISA IO ranges are: n100-n3FFh, n500-n7FFh, n900-nBFFh, nCD0-nFFFh.
68 */ 69 */
70/*! [End] no source code translation !*/
71
69#define ACPI_NON_ISA_ONLY_RANGES (u8) 0x01 72#define ACPI_NON_ISA_ONLY_RANGES (u8) 0x01
70#define ACPI_ISA_ONLY_RANGES (u8) 0x02 73#define ACPI_ISA_ONLY_RANGES (u8) 0x02
71#define ACPI_ENTIRE_RANGE (ACPI_NON_ISA_ONLY_RANGES | ACPI_ISA_ONLY_RANGES) 74#define ACPI_ENTIRE_RANGE (ACPI_NON_ISA_ONLY_RANGES | ACPI_ISA_ONLY_RANGES)
@@ -81,16 +84,26 @@ typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (Length+3) = (6
81#define ACPI_DECODE_16 (u8) 0x01 /* 16-bit IO address decode */ 84#define ACPI_DECODE_16 (u8) 0x01 /* 16-bit IO address decode */
82 85
83/* 86/*
84 * IRQ Attributes 87 * Interrupt attributes - used in multiple descriptors
85 */ 88 */
89
90/* Triggering */
91
86#define ACPI_LEVEL_SENSITIVE (u8) 0x00 92#define ACPI_LEVEL_SENSITIVE (u8) 0x00
87#define ACPI_EDGE_SENSITIVE (u8) 0x01 93#define ACPI_EDGE_SENSITIVE (u8) 0x01
88 94
95/* Polarity */
96
89#define ACPI_ACTIVE_HIGH (u8) 0x00 97#define ACPI_ACTIVE_HIGH (u8) 0x00
90#define ACPI_ACTIVE_LOW (u8) 0x01 98#define ACPI_ACTIVE_LOW (u8) 0x01
99#define ACPI_ACTIVE_BOTH (u8) 0x02
100
101/* Sharing */
91 102
92#define ACPI_EXCLUSIVE (u8) 0x00 103#define ACPI_EXCLUSIVE (u8) 0x00
93#define ACPI_SHARED (u8) 0x01 104#define ACPI_SHARED (u8) 0x01
105#define ACPI_EXCLUSIVE_AND_WAKE (u8) 0x02
106#define ACPI_SHARED_AND_WAKE (u8) 0x03
94 107
95/* 108/*
96 * DMA Attributes 109 * DMA Attributes
@@ -127,6 +140,8 @@ typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (Length+3) = (6
127#define ACPI_POS_DECODE (u8) 0x00 140#define ACPI_POS_DECODE (u8) 0x00
128#define ACPI_SUB_DECODE (u8) 0x01 141#define ACPI_SUB_DECODE (u8) 0x01
129 142
143/* Producer/Consumer */
144
130#define ACPI_PRODUCER (u8) 0x00 145#define ACPI_PRODUCER (u8) 0x00
131#define ACPI_CONSUMER (u8) 0x01 146#define ACPI_CONSUMER (u8) 0x01
132 147
@@ -192,6 +207,21 @@ struct acpi_resource_fixed_io {
192 u8 address_length; 207 u8 address_length;
193}; 208};
194 209
210struct acpi_resource_fixed_dma {
211 u16 request_lines;
212 u16 channels;
213 u8 width;
214};
215
216/* Values for Width field above */
217
218#define ACPI_DMA_WIDTH8 0
219#define ACPI_DMA_WIDTH16 1
220#define ACPI_DMA_WIDTH32 2
221#define ACPI_DMA_WIDTH64 3
222#define ACPI_DMA_WIDTH128 4
223#define ACPI_DMA_WIDTH256 5
224
195struct acpi_resource_vendor { 225struct acpi_resource_vendor {
196 u16 byte_length; 226 u16 byte_length;
197 u8 byte_data[1]; 227 u8 byte_data[1];
@@ -329,6 +359,166 @@ struct acpi_resource_generic_register {
329 u64 address; 359 u64 address;
330}; 360};
331 361
362struct acpi_resource_gpio {
363 u8 revision_id;
364 u8 connection_type;
365 u8 producer_consumer; /* For values, see Producer/Consumer above */
366 u8 pin_config;
367 u8 sharable; /* For values, see Interrupt Attributes above */
368 u8 io_restriction;
369 u8 triggering; /* For values, see Interrupt Attributes above */
370 u8 polarity; /* For values, see Interrupt Attributes above */
371 u16 drive_strength;
372 u16 debounce_timeout;
373 u16 pin_table_length;
374 u16 vendor_length;
375 struct acpi_resource_source resource_source;
376 u16 *pin_table;
377 u8 *vendor_data;
378};
379
380/* Values for GPIO connection_type field above */
381
382#define ACPI_RESOURCE_GPIO_TYPE_INT 0
383#define ACPI_RESOURCE_GPIO_TYPE_IO 1
384
385/* Values for pin_config field above */
386
387#define ACPI_PIN_CONFIG_DEFAULT 0
388#define ACPI_PIN_CONFIG_PULLUP 1
389#define ACPI_PIN_CONFIG_PULLDOWN 2
390#define ACPI_PIN_CONFIG_NOPULL 3
391
392/* Values for io_restriction field above */
393
394#define ACPI_IO_RESTRICT_NONE 0
395#define ACPI_IO_RESTRICT_INPUT 1
396#define ACPI_IO_RESTRICT_OUTPUT 2
397#define ACPI_IO_RESTRICT_NONE_PRESERVE 3
398
399/* Common structure for I2C, SPI, and UART serial descriptors */
400
401#define ACPI_RESOURCE_SERIAL_COMMON \
402 u8 revision_id; \
403 u8 type; \
404 u8 producer_consumer; /* For values, see Producer/Consumer above */\
405 u8 slave_mode; \
406 u8 type_revision_id; \
407 u16 type_data_length; \
408 u16 vendor_length; \
409 struct acpi_resource_source resource_source; \
410 u8 *vendor_data;
411
412struct acpi_resource_common_serialbus {
413ACPI_RESOURCE_SERIAL_COMMON};
414
415/* Values for the Type field above */
416
417#define ACPI_RESOURCE_SERIAL_TYPE_I2C 1
418#define ACPI_RESOURCE_SERIAL_TYPE_SPI 2
419#define ACPI_RESOURCE_SERIAL_TYPE_UART 3
420
421/* Values for slave_mode field above */
422
423#define ACPI_CONTROLLER_INITIATED 0
424#define ACPI_DEVICE_INITIATED 1
425
426struct acpi_resource_i2c_serialbus {
427 ACPI_RESOURCE_SERIAL_COMMON u8 access_mode;
428 u16 slave_address;
429 u32 connection_speed;
430};
431
432/* Values for access_mode field above */
433
434#define ACPI_I2C_7BIT_MODE 0
435#define ACPI_I2C_10BIT_MODE 1
436
437struct acpi_resource_spi_serialbus {
438 ACPI_RESOURCE_SERIAL_COMMON u8 wire_mode;
439 u8 device_polarity;
440 u8 data_bit_length;
441 u8 clock_phase;
442 u8 clock_polarity;
443 u16 device_selection;
444 u32 connection_speed;
445};
446
447/* Values for wire_mode field above */
448
449#define ACPI_SPI_4WIRE_MODE 0
450#define ACPI_SPI_3WIRE_MODE 1
451
452/* Values for device_polarity field above */
453
454#define ACPI_SPI_ACTIVE_LOW 0
455#define ACPI_SPI_ACTIVE_HIGH 1
456
457/* Values for clock_phase field above */
458
459#define ACPI_SPI_FIRST_PHASE 0
460#define ACPI_SPI_SECOND_PHASE 1
461
462/* Values for clock_polarity field above */
463
464#define ACPI_SPI_START_LOW 0
465#define ACPI_SPI_START_HIGH 1
466
467struct acpi_resource_uart_serialbus {
468 ACPI_RESOURCE_SERIAL_COMMON u8 endian;
469 u8 data_bits;
470 u8 stop_bits;
471 u8 flow_control;
472 u8 parity;
473 u8 lines_enabled;
474 u16 rx_fifo_size;
475 u16 tx_fifo_size;
476 u32 default_baud_rate;
477};
478
479/* Values for Endian field above */
480
481#define ACPI_UART_LITTLE_ENDIAN 0
482#define ACPI_UART_BIG_ENDIAN 1
483
484/* Values for data_bits field above */
485
486#define ACPI_UART_5_DATA_BITS 0
487#define ACPI_UART_6_DATA_BITS 1
488#define ACPI_UART_7_DATA_BITS 2
489#define ACPI_UART_8_DATA_BITS 3
490#define ACPI_UART_9_DATA_BITS 4
491
492/* Values for stop_bits field above */
493
494#define ACPI_UART_NO_STOP_BITS 0
495#define ACPI_UART_1_STOP_BIT 1
496#define ACPI_UART_1P5_STOP_BITS 2
497#define ACPI_UART_2_STOP_BITS 3
498
499/* Values for flow_control field above */
500
501#define ACPI_UART_FLOW_CONTROL_NONE 0
502#define ACPI_UART_FLOW_CONTROL_HW 1
503#define ACPI_UART_FLOW_CONTROL_XON_XOFF 2
504
505/* Values for Parity field above */
506
507#define ACPI_UART_PARITY_NONE 0
508#define ACPI_UART_PARITY_EVEN 1
509#define ACPI_UART_PARITY_ODD 2
510#define ACPI_UART_PARITY_MARK 3
511#define ACPI_UART_PARITY_SPACE 4
512
513/* Values for lines_enabled bitfield above */
514
515#define ACPI_UART_CARRIER_DETECT (1<<2)
516#define ACPI_UART_RING_INDICATOR (1<<3)
517#define ACPI_UART_DATA_SET_READY (1<<4)
518#define ACPI_UART_DATA_TERMINAL_READY (1<<5)
519#define ACPI_UART_CLEAR_TO_SEND (1<<6)
520#define ACPI_UART_REQUEST_TO_SEND (1<<7)
521
332/* ACPI_RESOURCE_TYPEs */ 522/* ACPI_RESOURCE_TYPEs */
333 523
334#define ACPI_RESOURCE_TYPE_IRQ 0 524#define ACPI_RESOURCE_TYPE_IRQ 0
@@ -348,7 +538,10 @@ struct acpi_resource_generic_register {
348#define ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 14 /* ACPI 3.0 */ 538#define ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 14 /* ACPI 3.0 */
349#define ACPI_RESOURCE_TYPE_EXTENDED_IRQ 15 539#define ACPI_RESOURCE_TYPE_EXTENDED_IRQ 15
350#define ACPI_RESOURCE_TYPE_GENERIC_REGISTER 16 540#define ACPI_RESOURCE_TYPE_GENERIC_REGISTER 16
351#define ACPI_RESOURCE_TYPE_MAX 16 541#define ACPI_RESOURCE_TYPE_GPIO 17 /* ACPI 5.0 */
542#define ACPI_RESOURCE_TYPE_FIXED_DMA 18 /* ACPI 5.0 */
543#define ACPI_RESOURCE_TYPE_SERIAL_BUS 19 /* ACPI 5.0 */
544#define ACPI_RESOURCE_TYPE_MAX 19
352 545
353/* Master union for resource descriptors */ 546/* Master union for resource descriptors */
354 547
@@ -358,6 +551,7 @@ union acpi_resource_data {
358 struct acpi_resource_start_dependent start_dpf; 551 struct acpi_resource_start_dependent start_dpf;
359 struct acpi_resource_io io; 552 struct acpi_resource_io io;
360 struct acpi_resource_fixed_io fixed_io; 553 struct acpi_resource_fixed_io fixed_io;
554 struct acpi_resource_fixed_dma fixed_dma;
361 struct acpi_resource_vendor vendor; 555 struct acpi_resource_vendor vendor;
362 struct acpi_resource_vendor_typed vendor_typed; 556 struct acpi_resource_vendor_typed vendor_typed;
363 struct acpi_resource_end_tag end_tag; 557 struct acpi_resource_end_tag end_tag;
@@ -370,6 +564,11 @@ union acpi_resource_data {
370 struct acpi_resource_extended_address64 ext_address64; 564 struct acpi_resource_extended_address64 ext_address64;
371 struct acpi_resource_extended_irq extended_irq; 565 struct acpi_resource_extended_irq extended_irq;
372 struct acpi_resource_generic_register generic_reg; 566 struct acpi_resource_generic_register generic_reg;
567 struct acpi_resource_gpio gpio;
568 struct acpi_resource_i2c_serialbus i2c_serial_bus;
569 struct acpi_resource_spi_serialbus spi_serial_bus;
570 struct acpi_resource_uart_serialbus uart_serial_bus;
571 struct acpi_resource_common_serialbus common_serial_bus;
373 572
374 /* Common fields */ 573 /* Common fields */
375 574
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index f1380287ed4d..8e1b92f6f650 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -255,6 +255,8 @@ struct acpi_table_fadt {
255 struct acpi_generic_address xpm_timer_block; /* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */ 255 struct acpi_generic_address xpm_timer_block; /* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */
256 struct acpi_generic_address xgpe0_block; /* 64-bit Extended General Purpose Event 0 Reg Blk address */ 256 struct acpi_generic_address xgpe0_block; /* 64-bit Extended General Purpose Event 0 Reg Blk address */
257 struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */ 257 struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */
258 struct acpi_generic_address sleep_control; /* 64-bit Sleep Control register */
259 struct acpi_generic_address sleep_status; /* 64-bit Sleep Status register */
258}; 260};
259 261
260/* Masks for FADT Boot Architecture Flags (boot_flags) */ 262/* Masks for FADT Boot Architecture Flags (boot_flags) */
@@ -264,6 +266,7 @@ struct acpi_table_fadt {
264#define ACPI_FADT_NO_VGA (1<<2) /* 02: [V4] It is not safe to probe for VGA hardware */ 266#define ACPI_FADT_NO_VGA (1<<2) /* 02: [V4] It is not safe to probe for VGA hardware */
265#define ACPI_FADT_NO_MSI (1<<3) /* 03: [V4] Message Signaled Interrupts (MSI) must not be enabled */ 267#define ACPI_FADT_NO_MSI (1<<3) /* 03: [V4] Message Signaled Interrupts (MSI) must not be enabled */
266#define ACPI_FADT_NO_ASPM (1<<4) /* 04: [V4] PCIe ASPM control must not be enabled */ 268#define ACPI_FADT_NO_ASPM (1<<4) /* 04: [V4] PCIe ASPM control must not be enabled */
269#define ACPI_FADT_NO_CMOS_RTC (1<<5) /* 05: [V5] No CMOS real-time clock present */
267 270
268#define FADT2_REVISION_ID 3 271#define FADT2_REVISION_ID 3
269 272
@@ -289,6 +292,8 @@ struct acpi_table_fadt {
289#define ACPI_FADT_REMOTE_POWER_ON (1<<17) /* 17: [V4] System is compatible with remote power on (ACPI 3.0) */ 292#define ACPI_FADT_REMOTE_POWER_ON (1<<17) /* 17: [V4] System is compatible with remote power on (ACPI 3.0) */
290#define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: [V4] All local APICs must use cluster model (ACPI 3.0) */ 293#define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: [V4] All local APICs must use cluster model (ACPI 3.0) */
291#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: [V4] All local x_aPICs must use physical dest mode (ACPI 3.0) */ 294#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: [V4] All local x_aPICs must use physical dest mode (ACPI 3.0) */
295#define ACPI_FADT_HW_REDUCED (1<<20) /* 20: [V5] ACPI hardware is not implemented (ACPI 5.0) */
296#define ACPI_FADT_LOW_POWER_S0 (1<<21) /* 21: [V5] S0 power savings are equal or better than S3 (ACPI 5.0) */
292 297
293/* Values for preferred_profile (Preferred Power Management Profiles) */ 298/* Values for preferred_profile (Preferred Power Management Profiles) */
294 299
@@ -299,14 +304,16 @@ enum acpi_prefered_pm_profiles {
299 PM_WORKSTATION = 3, 304 PM_WORKSTATION = 3,
300 PM_ENTERPRISE_SERVER = 4, 305 PM_ENTERPRISE_SERVER = 4,
301 PM_SOHO_SERVER = 5, 306 PM_SOHO_SERVER = 5,
302 PM_APPLIANCE_PC = 6 307 PM_APPLIANCE_PC = 6,
308 PM_PERFORMANCE_SERVER = 7,
309 PM_TABLET = 8
303}; 310};
304 311
305/* Reset to default packing */ 312/* Reset to default packing */
306 313
307#pragma pack() 314#pragma pack()
308 315
309#define ACPI_FADT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_fadt, f) 316#define ACPI_FADT_OFFSET(f) (u16) ACPI_OFFSET (struct acpi_table_fadt, f)
310 317
311/* 318/*
312 * Internal table-related structures 319 * Internal table-related structures
@@ -342,6 +349,7 @@ struct acpi_table_desc {
342 349
343#include <acpi/actbl1.h> 350#include <acpi/actbl1.h>
344#include <acpi/actbl2.h> 351#include <acpi/actbl2.h>
352#include <acpi/actbl3.h>
345 353
346/* 354/*
347 * Sizes of the various flavors of FADT. We need to look closely 355 * Sizes of the various flavors of FADT. We need to look closely
@@ -351,12 +359,15 @@ struct acpi_table_desc {
351 * FADT is the bottom line as to what the version really is. 359 * FADT is the bottom line as to what the version really is.
352 * 360 *
353 * For reference, the values below are as follows: 361 * For reference, the values below are as follows:
354 * FADT V1 size: 0x74 362 * FADT V1 size: 0x074
355 * FADT V2 size: 0x84 363 * FADT V2 size: 0x084
356 * FADT V3+ size: 0xF4 364 * FADT V3 size: 0x0F4
365 * FADT V4 size: 0x0F4
366 * FADT V5 size: 0x10C
357 */ 367 */
358#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) 368#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4)
359#define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (reserved4[0]) + 3) 369#define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (reserved4[0]) + 3)
360#define ACPI_FADT_V3_SIZE (u32) (sizeof (struct acpi_table_fadt)) 370#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control))
371#define ACPI_FADT_V5_SIZE (u32) (sizeof (struct acpi_table_fadt))
361 372
362#endif /* __ACTBL_H__ */ 373#endif /* __ACTBL_H__ */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 7504bc99b29b..71e747beac8f 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -228,7 +228,8 @@ enum acpi_einj_actions {
228 ACPI_EINJ_EXECUTE_OPERATION = 5, 228 ACPI_EINJ_EXECUTE_OPERATION = 5,
229 ACPI_EINJ_CHECK_BUSY_STATUS = 6, 229 ACPI_EINJ_CHECK_BUSY_STATUS = 6,
230 ACPI_EINJ_GET_COMMAND_STATUS = 7, 230 ACPI_EINJ_GET_COMMAND_STATUS = 7,
231 ACPI_EINJ_ACTION_RESERVED = 8, /* 8 and greater are reserved */ 231 ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS = 8,
232 ACPI_EINJ_ACTION_RESERVED = 9, /* 9 and greater are reserved */
232 ACPI_EINJ_TRIGGER_ERROR = 0xFF /* Except for this value */ 233 ACPI_EINJ_TRIGGER_ERROR = 0xFF /* Except for this value */
233}; 234};
234 235
@@ -240,7 +241,27 @@ enum acpi_einj_instructions {
240 ACPI_EINJ_WRITE_REGISTER = 2, 241 ACPI_EINJ_WRITE_REGISTER = 2,
241 ACPI_EINJ_WRITE_REGISTER_VALUE = 3, 242 ACPI_EINJ_WRITE_REGISTER_VALUE = 3,
242 ACPI_EINJ_NOOP = 4, 243 ACPI_EINJ_NOOP = 4,
243 ACPI_EINJ_INSTRUCTION_RESERVED = 5 /* 5 and greater are reserved */ 244 ACPI_EINJ_FLUSH_CACHELINE = 5,
245 ACPI_EINJ_INSTRUCTION_RESERVED = 6 /* 6 and greater are reserved */
246};
247
248struct acpi_einj_error_type_with_addr {
249 u32 error_type;
250 u32 vendor_struct_offset;
251 u32 flags;
252 u32 apic_id;
253 u64 address;
254 u64 range;
255 u32 pcie_id;
256};
257
258struct acpi_einj_vendor {
259 u32 length;
260 u32 pcie_id;
261 u16 vendor_id;
262 u16 device_id;
263 u8 revision_id;
264 u8 reserved[3];
244}; 265};
245 266
246/* EINJ Trigger Error Action Table */ 267/* EINJ Trigger Error Action Table */
@@ -275,6 +296,7 @@ enum acpi_einj_command_status {
275#define ACPI_EINJ_PLATFORM_CORRECTABLE (1<<9) 296#define ACPI_EINJ_PLATFORM_CORRECTABLE (1<<9)
276#define ACPI_EINJ_PLATFORM_UNCORRECTABLE (1<<10) 297#define ACPI_EINJ_PLATFORM_UNCORRECTABLE (1<<10)
277#define ACPI_EINJ_PLATFORM_FATAL (1<<11) 298#define ACPI_EINJ_PLATFORM_FATAL (1<<11)
299#define ACPI_EINJ_VENDOR_DEFINED (1<<31)
278 300
279/******************************************************************************* 301/*******************************************************************************
280 * 302 *
@@ -631,7 +653,9 @@ enum acpi_madt_type {
631 ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8, 653 ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8,
632 ACPI_MADT_TYPE_LOCAL_X2APIC = 9, 654 ACPI_MADT_TYPE_LOCAL_X2APIC = 9,
633 ACPI_MADT_TYPE_LOCAL_X2APIC_NMI = 10, 655 ACPI_MADT_TYPE_LOCAL_X2APIC_NMI = 10,
634 ACPI_MADT_TYPE_RESERVED = 11 /* 11 and greater are reserved */ 656 ACPI_MADT_TYPE_GENERIC_INTERRUPT = 11,
657 ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12,
658 ACPI_MADT_TYPE_RESERVED = 13 /* 13 and greater are reserved */
635}; 659};
636 660
637/* 661/*
@@ -752,11 +776,36 @@ struct acpi_madt_local_x2apic_nmi {
752 u8 reserved[3]; 776 u8 reserved[3];
753}; 777};
754 778
779/* 11: Generic Interrupt (ACPI 5.0) */
780
781struct acpi_madt_generic_interrupt {
782 struct acpi_subtable_header header;
783 u16 reserved; /* Reserved - must be zero */
784 u32 gic_id;
785 u32 uid;
786 u32 flags;
787 u32 parking_version;
788 u32 performance_interrupt;
789 u64 parked_address;
790 u64 base_address;
791};
792
793/* 12: Generic Distributor (ACPI 5.0) */
794
795struct acpi_madt_generic_distributor {
796 struct acpi_subtable_header header;
797 u16 reserved; /* Reserved - must be zero */
798 u32 gic_id;
799 u64 base_address;
800 u32 global_irq_base;
801 u32 reserved2; /* Reserved - must be zero */
802};
803
755/* 804/*
756 * Common flags fields for MADT subtables 805 * Common flags fields for MADT subtables
757 */ 806 */
758 807
759/* MADT Local APIC flags (lapic_flags) */ 808/* MADT Local APIC flags (lapic_flags) and GIC flags */
760 809
761#define ACPI_MADT_ENABLED (1) /* 00: Processor is usable if set */ 810#define ACPI_MADT_ENABLED (1) /* 00: Processor is usable if set */
762 811
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
new file mode 100644
index 000000000000..c22ce80e9535
--- /dev/null
+++ b/include/acpi/actbl3.h
@@ -0,0 +1,552 @@
1/******************************************************************************
2 *
3 * Name: actbl3.h - ACPI Table Definitions
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#ifndef __ACTBL3_H__
45#define __ACTBL3_H__
46
47/*******************************************************************************
48 *
49 * Additional ACPI Tables (3)
50 *
51 * These tables are not consumed directly by the ACPICA subsystem, but are
52 * included here to support device drivers and the AML disassembler.
53 *
54 * The tables in this file are fully defined within the ACPI specification.
55 *
56 ******************************************************************************/
57
58/*
59 * Values for description table header signatures for tables defined in this
60 * file. Useful because they make it more difficult to inadvertently type in
61 * the wrong signature.
62 */
63#define ACPI_SIG_BGRT "BGRT" /* Boot Graphics Resource Table */
64#define ACPI_SIG_DRTM "DRTM" /* Dynamic Root of Trust for Measurement table */
65#define ACPI_SIG_FPDT "FPDT" /* Firmware Performance Data Table */
66#define ACPI_SIG_GTDT "GTDT" /* Generic Timer Description Table */
67#define ACPI_SIG_MPST "MPST" /* Memory Power State Table */
68#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */
69#define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */
70#define ACPI_SIG_RASF "RASF" /* RAS Feature table */
71
72#define ACPI_SIG_S3PT "S3PT" /* S3 Performance (sub)Table */
73#define ACPI_SIG_PCCS "PCC" /* PCC Shared Memory Region */
74
75/* Reserved table signatures */
76
77#define ACPI_SIG_CSRT "CSRT" /* Core System Resources Table */
78#define ACPI_SIG_DBG2 "DBG2" /* Debug Port table 2 */
79#define ACPI_SIG_MATR "MATR" /* Memory Address Translation Table */
80#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
81#define ACPI_SIG_WPBT "WPBT" /* Windows Platform Binary Table */
82
83/*
84 * All tables must be byte-packed to match the ACPI specification, since
85 * the tables are provided by the system BIOS.
86 */
87#pragma pack(1)
88
89/*
90 * Note about bitfields: The u8 type is used for bitfields in ACPI tables.
91 * This is the only type that is even remotely portable. Anything else is not
92 * portable, so do not use any other bitfield types.
93 */
94
95/*******************************************************************************
96 *
97 * BGRT - Boot Graphics Resource Table (ACPI 5.0)
98 * Version 1
99 *
100 ******************************************************************************/
101
102struct acpi_table_bgrt {
103 struct acpi_table_header header; /* Common ACPI table header */
104 u16 version;
105 u8 status;
106 u8 image_type;
107 u64 image_address;
108 u32 image_offset_x;
109 u32 image_offset_y;
110};
111
112/*******************************************************************************
113 *
114 * DRTM - Dynamic Root of Trust for Measurement table
115 *
116 ******************************************************************************/
117
118struct acpi_table_drtm {
119 struct acpi_table_header header; /* Common ACPI table header */
120 u64 entry_base_address;
121 u64 entry_length;
122 u32 entry_address32;
123 u64 entry_address64;
124 u64 exit_address;
125 u64 log_area_address;
126 u32 log_area_length;
127 u64 arch_dependent_address;
128 u32 flags;
129};
130
131/* 1) Validated Tables List */
132
133struct acpi_drtm_vtl_list {
134 u32 validated_table_list_count;
135};
136
137/* 2) Resources List */
138
139struct acpi_drtm_resource_list {
140 u32 resource_list_count;
141};
142
143/* 3) Platform-specific Identifiers List */
144
145struct acpi_drtm_id_list {
146 u32 id_list_count;
147};
148
149/*******************************************************************************
150 *
151 * FPDT - Firmware Performance Data Table (ACPI 5.0)
152 * Version 1
153 *
154 ******************************************************************************/
155
156struct acpi_table_fpdt {
157 struct acpi_table_header header; /* Common ACPI table header */
158};
159
160/* FPDT subtable header */
161
162struct acpi_fpdt_header {
163 u16 type;
164 u8 length;
165 u8 revision;
166};
167
168/* Values for Type field above */
169
170enum acpi_fpdt_type {
171 ACPI_FPDT_TYPE_BOOT = 0,
172 ACPI_FPDT_TYPE_S3PERF = 1,
173};
174
175/*
176 * FPDT subtables
177 */
178
179/* 0: Firmware Basic Boot Performance Record */
180
181struct acpi_fpdt_boot {
182 struct acpi_fpdt_header header;
183 u8 reserved[4];
184 u64 reset_end;
185 u64 load_start;
186 u64 startup_start;
187 u64 exit_services_entry;
188 u64 exit_services_exit;
189};
190
191/* 1: S3 Performance Table Pointer Record */
192
193struct acpi_fpdt_s3pt_ptr {
194 struct acpi_fpdt_header header;
195 u8 reserved[4];
196 u64 address;
197};
198
199/*
200 * S3PT - S3 Performance Table. This table is pointed to by the
201 * FPDT S3 Pointer Record above.
202 */
203struct acpi_table_s3pt {
204 u8 signature[4]; /* "S3PT" */
205 u32 length;
206};
207
208/*
209 * S3PT Subtables
210 */
211struct acpi_s3pt_header {
212 u16 type;
213 u8 length;
214 u8 revision;
215};
216
217/* Values for Type field above */
218
219enum acpi_s3pt_type {
220 ACPI_S3PT_TYPE_RESUME = 0,
221 ACPI_S3PT_TYPE_SUSPEND = 1,
222};
223
224struct acpi_s3pt_resume {
225 struct acpi_s3pt_header header;
226 u32 resume_count;
227 u64 full_resume;
228 u64 average_resume;
229};
230
231struct acpi_s3pt_suspend {
232 struct acpi_s3pt_header header;
233 u64 suspend_start;
234 u64 suspend_end;
235};
236
237/*******************************************************************************
238 *
239 * GTDT - Generic Timer Description Table (ACPI 5.0)
240 * Version 1
241 *
242 ******************************************************************************/
243
244struct acpi_table_gtdt {
245 struct acpi_table_header header; /* Common ACPI table header */
246 u64 address;
247 u32 flags;
248 u32 secure_pl1_interrupt;
249 u32 secure_pl1_flags;
250 u32 non_secure_pl1_interrupt;
251 u32 non_secure_pl1_flags;
252 u32 virtual_timer_interrupt;
253 u32 virtual_timer_flags;
254 u32 non_secure_pl2_interrupt;
255 u32 non_secure_pl2_flags;
256};
257
258/* Values for Flags field above */
259
260#define ACPI_GTDT_MAPPED_BLOCK_PRESENT 1
261
262/* Values for all "TimerFlags" fields above */
263
264#define ACPI_GTDT_INTERRUPT_MODE 1
265#define ACPI_GTDT_INTERRUPT_POLARITY 2
266
267/*******************************************************************************
268 *
269 * MPST - Memory Power State Table (ACPI 5.0)
270 * Version 1
271 *
272 ******************************************************************************/
273
274#define ACPI_MPST_CHANNEL_INFO \
275 u16 reserved1; \
276 u8 channel_id; \
277 u8 reserved2; \
278 u16 power_node_count;
279
280/* Main table */
281
282struct acpi_table_mpst {
283 struct acpi_table_header header; /* Common ACPI table header */
284 ACPI_MPST_CHANNEL_INFO /* Platform Communication Channel */
285};
286
287/* Memory Platform Communication Channel Info */
288
289struct acpi_mpst_channel {
290 ACPI_MPST_CHANNEL_INFO /* Platform Communication Channel */
291};
292
293/* Memory Power Node Structure */
294
295struct acpi_mpst_power_node {
296 u8 flags;
297 u8 reserved1;
298 u16 node_id;
299 u32 length;
300 u64 range_address;
301 u64 range_length;
302 u8 num_power_states;
303 u8 num_physical_components;
304 u16 reserved2;
305};
306
307/* Values for Flags field above */
308
309#define ACPI_MPST_ENABLED 1
310#define ACPI_MPST_POWER_MANAGED 2
311#define ACPI_MPST_HOT_PLUG_CAPABLE 4
312
313/* Memory Power State Structure (follows POWER_NODE above) */
314
315struct acpi_mpst_power_state {
316 u8 power_state;
317 u8 info_index;
318};
319
320/* Physical Component ID Structure (follows POWER_STATE above) */
321
322struct acpi_mpst_component {
323 u16 component_id;
324};
325
326/* Memory Power State Characteristics Structure (follows all POWER_NODEs) */
327
328struct acpi_mpst_data_hdr {
329 u16 characteristics_count;
330};
331
332struct acpi_mpst_power_data {
333 u8 revision;
334 u8 flags;
335 u16 reserved1;
336 u32 average_power;
337 u32 power_saving;
338 u64 exit_latency;
339 u64 reserved2;
340};
341
342/* Values for Flags field above */
343
344#define ACPI_MPST_PRESERVE 1
345#define ACPI_MPST_AUTOENTRY 2
346#define ACPI_MPST_AUTOEXIT 4
347
348/* Shared Memory Region (not part of an ACPI table) */
349
350struct acpi_mpst_shared {
351 u32 signature;
352 u16 pcc_command;
353 u16 pcc_status;
354 u16 command_register;
355 u16 status_register;
356 u16 power_state_id;
357 u16 power_node_id;
358 u64 energy_consumed;
359 u64 average_power;
360};
361
362/*******************************************************************************
363 *
364 * PCCT - Platform Communications Channel Table (ACPI 5.0)
365 * Version 1
366 *
367 ******************************************************************************/
368
369struct acpi_table_pcct {
370 struct acpi_table_header header; /* Common ACPI table header */
371 u32 flags;
372 u32 latency;
373 u32 reserved;
374};
375
376/* Values for Flags field above */
377
378#define ACPI_PCCT_DOORBELL 1
379
380/*
381 * PCCT subtables
382 */
383
384/* 0: Generic Communications Subspace */
385
386struct acpi_pcct_subspace {
387 struct acpi_subtable_header header;
388 u8 reserved[6];
389 u64 base_address;
390 u64 length;
391 struct acpi_generic_address doorbell_register;
392 u64 preserve_mask;
393 u64 write_mask;
394};
395
396/*
397 * PCC memory structures (not part of the ACPI table)
398 */
399
400/* Shared Memory Region */
401
402struct acpi_pcct_shared_memory {
403 u32 signature;
404 u16 command;
405 u16 status;
406};
407
408/*******************************************************************************
409 *
410 * PMTT - Platform Memory Topology Table (ACPI 5.0)
411 * Version 1
412 *
413 ******************************************************************************/
414
415struct acpi_table_pmtt {
416 struct acpi_table_header header; /* Common ACPI table header */
417 u32 reserved;
418};
419
420/* Common header for PMTT subtables that follow main table */
421
422struct acpi_pmtt_header {
423 u8 type;
424 u8 reserved1;
425 u16 length;
426 u16 flags;
427 u16 reserved2;
428};
429
430/* Values for Type field above */
431
432#define ACPI_PMTT_TYPE_SOCKET 0
433#define ACPI_PMTT_TYPE_CONTROLLER 1
434#define ACPI_PMTT_TYPE_DIMM 2
435#define ACPI_PMTT_TYPE_RESERVED 3 /* 0x03-0xFF are reserved */
436
437/* Values for Flags field above */
438
439#define ACPI_PMTT_TOP_LEVEL 0x0001
440#define ACPI_PMTT_PHYSICAL 0x0002
441#define ACPI_PMTT_MEMORY_TYPE 0x000C
442
443/*
444 * PMTT subtables, correspond to Type in struct acpi_pmtt_header
445 */
446
447/* 0: Socket Structure */
448
449struct acpi_pmtt_socket {
450 struct acpi_pmtt_header header;
451 u16 socket_id;
452 u16 reserved;
453};
454
455/* 1: Memory Controller subtable */
456
457struct acpi_pmtt_controller {
458 struct acpi_pmtt_header header;
459 u32 read_latency;
460 u32 write_latency;
461 u32 read_bandwidth;
462 u32 write_bandwidth;
463 u16 access_width;
464 u16 alignment;
465 u16 reserved;
466 u16 domain_count;
467};
468
469/* 1a: Proximity Domain substructure */
470
471struct acpi_pmtt_domain {
472 u32 proximity_domain;
473};
474
475/* 2: Physical Component Identifier (DIMM) */
476
477struct acpi_pmtt_physical_component {
478 struct acpi_pmtt_header header;
479 u16 component_id;
480 u16 reserved;
481 u32 memory_size;
482 u32 bios_handle;
483};
484
485/*******************************************************************************
486 *
487 * RASF - RAS Feature Table (ACPI 5.0)
488 * Version 1
489 *
490 ******************************************************************************/
491
492struct acpi_table_rasf {
493 struct acpi_table_header header; /* Common ACPI table header */
494 u8 channel_id[12];
495};
496
497/* RASF Platform Communication Channel Shared Memory Region */
498
499struct acpi_rasf_shared_memory {
500 u32 signature;
501 u16 command;
502 u16 status;
503 u64 requested_address;
504 u64 requested_length;
505 u64 actual_address;
506 u64 actual_length;
507 u16 flags;
508 u8 speed;
509};
510
511/* Masks for Flags and Speed fields above */
512
513#define ACPI_RASF_SCRUBBER_RUNNING 1
514#define ACPI_RASF_SPEED (7<<1)
515
516/* Channel Commands */
517
518enum acpi_rasf_commands {
519 ACPI_RASF_GET_RAS_CAPABILITIES = 1,
520 ACPI_RASF_GET_PATROL_PARAMETERS = 2,
521 ACPI_RASF_START_PATROL_SCRUBBER = 3,
522 ACPI_RASF_STOP_PATROL_SCRUBBER = 4
523};
524
525/* Channel Command flags */
526
527#define ACPI_RASF_GENERATE_SCI (1<<15)
528
529/* Status values */
530
531enum acpi_rasf_status {
532 ACPI_RASF_SUCCESS = 0,
533 ACPI_RASF_NOT_VALID = 1,
534 ACPI_RASF_NOT_SUPPORTED = 2,
535 ACPI_RASF_BUSY = 3,
536 ACPI_RASF_FAILED = 4,
537 ACPI_RASF_ABORTED = 5,
538 ACPI_RASF_INVALID_DATA = 6
539};
540
541/* Status flags */
542
543#define ACPI_RASF_COMMAND_COMPLETE (1)
544#define ACPI_RASF_SCI_DOORBELL (1<<1)
545#define ACPI_RASF_ERROR (1<<2)
546#define ACPI_RASF_STATUS (0x1F<<3)
547
548/* Reset to default packing */
549
550#pragma pack()
551
552#endif /* __ACTBL3_H__ */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index ed73f6705c86..d5dee7ce9474 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -712,8 +712,10 @@ typedef u8 acpi_adr_space_type;
712#define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5 712#define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5
713#define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6 713#define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6
714#define ACPI_ADR_SPACE_IPMI (acpi_adr_space_type) 7 714#define ACPI_ADR_SPACE_IPMI (acpi_adr_space_type) 7
715#define ACPI_ADR_SPACE_GPIO (acpi_adr_space_type) 8
716#define ACPI_ADR_SPACE_GSBUS (acpi_adr_space_type) 9
715 717
716#define ACPI_NUM_PREDEFINED_REGIONS 8 718#define ACPI_NUM_PREDEFINED_REGIONS 10
717 719
718/* 720/*
719 * Special Address Spaces 721 * Special Address Spaces
@@ -957,6 +959,14 @@ acpi_status(*acpi_adr_space_handler) (u32 function,
957 959
958#define ACPI_DEFAULT_HANDLER NULL 960#define ACPI_DEFAULT_HANDLER NULL
959 961
962/* Special Context data for generic_serial_bus/general_purpose_io (ACPI 5.0) */
963
964struct acpi_connection_info {
965 u8 *connection;
966 u16 length;
967 u8 access_length;
968};
969
960typedef 970typedef
961acpi_status(*acpi_adr_space_setup) (acpi_handle region_handle, 971acpi_status(*acpi_adr_space_setup) (acpi_handle region_handle,
962 u32 function, 972 u32 function,
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index c37c34275a44..bc9ec1d7698c 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -17,7 +17,7 @@
17 17
18/*****************************************************************************/ 18/*****************************************************************************/
19/* 19/*
20 * the payload for a key of type "user" 20 * the payload for a key of type "user" or "logon"
21 * - once filled in and attached to a key: 21 * - once filled in and attached to a key:
22 * - the payload struct is invariant may not be changed, only replaced 22 * - the payload struct is invariant may not be changed, only replaced
23 * - the payload must be read with RCU procedures or with the key semaphore 23 * - the payload must be read with RCU procedures or with the key semaphore
@@ -33,6 +33,7 @@ struct user_key_payload {
33}; 33};
34 34
35extern struct key_type key_type_user; 35extern struct key_type key_type_user;
36extern struct key_type key_type_logon;
36 37
37extern int user_instantiate(struct key *key, const void *data, size_t datalen); 38extern int user_instantiate(struct key *key, const void *data, size_t datalen);
38extern int user_update(struct key *key, const void *data, size_t datalen); 39extern int user_update(struct key *key, const void *data, size_t datalen);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 627a3a42e4d8..3f968665899b 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -310,6 +310,11 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
310 u32 *mask, u32 req); 310 u32 *mask, u32 req);
311extern void acpi_early_init(void); 311extern void acpi_early_init(void);
312 312
313extern int acpi_nvs_register(__u64 start, __u64 size);
314
315extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
316 void *data);
317
313#else /* !CONFIG_ACPI */ 318#else /* !CONFIG_ACPI */
314 319
315#define acpi_disabled 1 320#define acpi_disabled 1
@@ -352,15 +357,18 @@ static inline int acpi_table_parse(char *id,
352{ 357{
353 return -1; 358 return -1;
354} 359}
355#endif /* !CONFIG_ACPI */
356 360
357#ifdef CONFIG_ACPI_SLEEP 361static inline int acpi_nvs_register(__u64 start, __u64 size)
358int suspend_nvs_register(unsigned long start, unsigned long size);
359#else
360static inline int suspend_nvs_register(unsigned long a, unsigned long b)
361{ 362{
362 return 0; 363 return 0;
363} 364}
364#endif 365
366static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
367 void *data)
368{
369 return 0;
370}
371
372#endif /* !CONFIG_ACPI */
365 373
366#endif /*_LINUX_ACPI_H*/ 374#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/acpi_io.h b/include/linux/acpi_io.h
index 4afd7102459d..b0ffa219993e 100644
--- a/include/linux/acpi_io.h
+++ b/include/linux/acpi_io.h
@@ -12,4 +12,7 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
12 12
13void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size); 13void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size);
14 14
15int acpi_os_map_generic_address(struct acpi_generic_address *addr);
16void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
17
15#endif 18#endif
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 23f81de51829..712abcc205ae 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -186,7 +186,14 @@ struct cpuidle_governor {
186extern int cpuidle_register_governor(struct cpuidle_governor *gov); 186extern int cpuidle_register_governor(struct cpuidle_governor *gov);
187extern void cpuidle_unregister_governor(struct cpuidle_governor *gov); 187extern void cpuidle_unregister_governor(struct cpuidle_governor *gov);
188 188
189#ifdef CONFIG_INTEL_IDLE
190extern int intel_idle_cpu_init(int cpu);
189#else 191#else
192static inline int intel_idle_cpu_init(int cpu) { return -1; }
193#endif
194
195#else
196static inline int intel_idle_cpu_init(int cpu) { return -1; }
190 197
191static inline int cpuidle_register_governor(struct cpuidle_governor *gov) 198static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
192{return 0;} 199{return 0;}
diff --git a/include/linux/device.h b/include/linux/device.h
index 5b3adb8f9588..b63fb393aa58 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -279,11 +279,11 @@ struct device *driver_find_device(struct device_driver *drv,
279 279
280/** 280/**
281 * struct subsys_interface - interfaces to device functions 281 * struct subsys_interface - interfaces to device functions
282 * @name name of the device function 282 * @name: name of the device function
283 * @subsystem subsytem of the devices to attach to 283 * @subsys: subsytem of the devices to attach to
284 * @node the list of functions registered at the subsystem 284 * @node: the list of functions registered at the subsystem
285 * @add device hookup to device function handler 285 * @add_dev: device hookup to device function handler
286 * @remove device hookup to device function handler 286 * @remove_dev: device hookup to device function handler
287 * 287 *
288 * Simple interfaces attached to a subsystem. Multiple interfaces can 288 * Simple interfaces attached to a subsystem. Multiple interfaces can
289 * attach to a subsystem and its devices. Unlike drivers, they do not 289 * attach to a subsystem and its devices. Unlike drivers, they do not
@@ -612,6 +612,7 @@ struct device_dma_parameters {
612 * @archdata: For arch-specific additions. 612 * @archdata: For arch-specific additions.
613 * @of_node: Associated device tree node. 613 * @of_node: Associated device tree node.
614 * @devt: For creating the sysfs "dev". 614 * @devt: For creating the sysfs "dev".
615 * @id: device instance
615 * @devres_lock: Spinlock to protect the resource of the device. 616 * @devres_lock: Spinlock to protect the resource of the device.
616 * @devres_head: The resources list of the device. 617 * @devres_head: The resources list of the device.
617 * @knode_class: The node used to add the device to the class list. 618 * @knode_class: The node used to add the device to the class list.
@@ -1003,6 +1004,10 @@ extern long sysfs_deprecated;
1003 * Each module may only use this macro once, and calling it replaces 1004 * Each module may only use this macro once, and calling it replaces
1004 * module_init() and module_exit(). 1005 * module_init() and module_exit().
1005 * 1006 *
1007 * @__driver: driver name
1008 * @__register: register function for this driver type
1009 * @__unregister: unregister function for this driver type
1010 *
1006 * Use this macro to construct bus specific macros for registering 1011 * Use this macro to construct bus specific macros for registering
1007 * drivers, and do not use it on its own. 1012 * drivers, and do not use it on its own.
1008 */ 1013 */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 0244082d42c5..386da09f229d 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -396,6 +396,7 @@ struct inodes_stat_t {
396#include <linux/rculist_bl.h> 396#include <linux/rculist_bl.h>
397#include <linux/atomic.h> 397#include <linux/atomic.h>
398#include <linux/shrinker.h> 398#include <linux/shrinker.h>
399#include <linux/migrate_mode.h>
399 400
400#include <asm/byteorder.h> 401#include <asm/byteorder.h>
401 402
@@ -526,7 +527,6 @@ enum positive_aop_returns {
526struct page; 527struct page;
527struct address_space; 528struct address_space;
528struct writeback_control; 529struct writeback_control;
529enum migrate_mode;
530 530
531struct iov_iter { 531struct iov_iter {
532 const struct iovec *iov; 532 const struct iovec *iov;
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 2fa0901219d4..0d7d6a1b172f 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -50,9 +50,11 @@
50 * note header. For kdump, the code in vmcore.c runs in the context 50 * note header. For kdump, the code in vmcore.c runs in the context
51 * of the second kernel to combine them into one note. 51 * of the second kernel to combine them into one note.
52 */ 52 */
53#ifndef KEXEC_NOTE_BYTES
53#define KEXEC_NOTE_BYTES ( (KEXEC_NOTE_HEAD_BYTES * 2) + \ 54#define KEXEC_NOTE_BYTES ( (KEXEC_NOTE_HEAD_BYTES * 2) + \
54 KEXEC_CORE_NOTE_NAME_BYTES + \ 55 KEXEC_CORE_NOTE_NAME_BYTES + \
55 KEXEC_CORE_NOTE_DESC_BYTES ) 56 KEXEC_CORE_NOTE_DESC_BYTES )
57#endif
56 58
57/* 59/*
58 * This structure is used to hold the arguments that are used when loading 60 * This structure is used to hold the arguments that are used when loading
diff --git a/include/linux/key.h b/include/linux/key.h
index bfc014c57351..5253471cd2ea 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -271,7 +271,7 @@ extern int keyring_add_key(struct key *keyring,
271 271
272extern struct key *key_lookup(key_serial_t id); 272extern struct key *key_lookup(key_serial_t id);
273 273
274static inline key_serial_t key_serial(struct key *key) 274static inline key_serial_t key_serial(const struct key *key)
275{ 275{
276 return key ? key->serial : 0; 276 return key ? key->serial : 0;
277} 277}
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index eaf867412f7a..05ed2828a553 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -3,22 +3,10 @@
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <linux/mempolicy.h> 5#include <linux/mempolicy.h>
6#include <linux/migrate_mode.h>
6 7
7typedef struct page *new_page_t(struct page *, unsigned long private, int **); 8typedef struct page *new_page_t(struct page *, unsigned long private, int **);
8 9
9/*
10 * MIGRATE_ASYNC means never block
11 * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking
12 * on most operations but not ->writepage as the potential stall time
13 * is too significant
14 * MIGRATE_SYNC will block when migrating pages
15 */
16enum migrate_mode {
17 MIGRATE_ASYNC,
18 MIGRATE_SYNC_LIGHT,
19 MIGRATE_SYNC,
20};
21
22#ifdef CONFIG_MIGRATION 10#ifdef CONFIG_MIGRATION
23#define PAGE_MIGRATION 1 11#define PAGE_MIGRATION 1
24 12
diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h
new file mode 100644
index 000000000000..ebf3d89a3919
--- /dev/null
+++ b/include/linux/migrate_mode.h
@@ -0,0 +1,16 @@
1#ifndef MIGRATE_MODE_H_INCLUDED
2#define MIGRATE_MODE_H_INCLUDED
3/*
4 * MIGRATE_ASYNC means never block
5 * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking
6 * on most operations but not ->writepage as the potential stall time
7 * is too significant
8 * MIGRATE_SYNC will block when migrating pages
9 */
10enum migrate_mode {
11 MIGRATE_ASYNC,
12 MIGRATE_SYNC_LIGHT,
13 MIGRATE_SYNC,
14};
15
16#endif /* MIGRATE_MODE_H_INCLUDED */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
new file mode 100644
index 000000000000..9490a00529f4
--- /dev/null
+++ b/include/linux/nvme.h
@@ -0,0 +1,434 @@
1/*
2 * Definitions for the NVM Express interface
3 * Copyright (c) 2011, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#ifndef _LINUX_NVME_H
20#define _LINUX_NVME_H
21
22#include <linux/types.h>
23
24struct nvme_bar {
25 __u64 cap; /* Controller Capabilities */
26 __u32 vs; /* Version */
27 __u32 intms; /* Interrupt Mask Set */
28 __u32 intmc; /* Interrupt Mask Clear */
29 __u32 cc; /* Controller Configuration */
30 __u32 rsvd1; /* Reserved */
31 __u32 csts; /* Controller Status */
32 __u32 rsvd2; /* Reserved */
33 __u32 aqa; /* Admin Queue Attributes */
34 __u64 asq; /* Admin SQ Base Address */
35 __u64 acq; /* Admin CQ Base Address */
36};
37
38#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
39#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
40
41enum {
42 NVME_CC_ENABLE = 1 << 0,
43 NVME_CC_CSS_NVM = 0 << 4,
44 NVME_CC_MPS_SHIFT = 7,
45 NVME_CC_ARB_RR = 0 << 11,
46 NVME_CC_ARB_WRRU = 1 << 11,
47 NVME_CC_ARB_VS = 7 << 11,
48 NVME_CC_SHN_NONE = 0 << 14,
49 NVME_CC_SHN_NORMAL = 1 << 14,
50 NVME_CC_SHN_ABRUPT = 2 << 14,
51 NVME_CC_IOSQES = 6 << 16,
52 NVME_CC_IOCQES = 4 << 20,
53 NVME_CSTS_RDY = 1 << 0,
54 NVME_CSTS_CFS = 1 << 1,
55 NVME_CSTS_SHST_NORMAL = 0 << 2,
56 NVME_CSTS_SHST_OCCUR = 1 << 2,
57 NVME_CSTS_SHST_CMPLT = 2 << 2,
58};
59
60struct nvme_id_power_state {
61 __le16 max_power; /* centiwatts */
62 __u16 rsvd2;
63 __le32 entry_lat; /* microseconds */
64 __le32 exit_lat; /* microseconds */
65 __u8 read_tput;
66 __u8 read_lat;
67 __u8 write_tput;
68 __u8 write_lat;
69 __u8 rsvd16[16];
70};
71
72#define NVME_VS(major, minor) (major << 16 | minor)
73
74struct nvme_id_ctrl {
75 __le16 vid;
76 __le16 ssvid;
77 char sn[20];
78 char mn[40];
79 char fr[8];
80 __u8 rab;
81 __u8 ieee[3];
82 __u8 mic;
83 __u8 mdts;
84 __u8 rsvd78[178];
85 __le16 oacs;
86 __u8 acl;
87 __u8 aerl;
88 __u8 frmw;
89 __u8 lpa;
90 __u8 elpe;
91 __u8 npss;
92 __u8 rsvd264[248];
93 __u8 sqes;
94 __u8 cqes;
95 __u8 rsvd514[2];
96 __le32 nn;
97 __le16 oncs;
98 __le16 fuses;
99 __u8 fna;
100 __u8 vwc;
101 __le16 awun;
102 __le16 awupf;
103 __u8 rsvd530[1518];
104 struct nvme_id_power_state psd[32];
105 __u8 vs[1024];
106};
107
108struct nvme_lbaf {
109 __le16 ms;
110 __u8 ds;
111 __u8 rp;
112};
113
114struct nvme_id_ns {
115 __le64 nsze;
116 __le64 ncap;
117 __le64 nuse;
118 __u8 nsfeat;
119 __u8 nlbaf;
120 __u8 flbas;
121 __u8 mc;
122 __u8 dpc;
123 __u8 dps;
124 __u8 rsvd30[98];
125 struct nvme_lbaf lbaf[16];
126 __u8 rsvd192[192];
127 __u8 vs[3712];
128};
129
130enum {
131 NVME_NS_FEAT_THIN = 1 << 0,
132 NVME_LBAF_RP_BEST = 0,
133 NVME_LBAF_RP_BETTER = 1,
134 NVME_LBAF_RP_GOOD = 2,
135 NVME_LBAF_RP_DEGRADED = 3,
136};
137
138struct nvme_lba_range_type {
139 __u8 type;
140 __u8 attributes;
141 __u8 rsvd2[14];
142 __u64 slba;
143 __u64 nlb;
144 __u8 guid[16];
145 __u8 rsvd48[16];
146};
147
148enum {
149 NVME_LBART_TYPE_FS = 0x01,
150 NVME_LBART_TYPE_RAID = 0x02,
151 NVME_LBART_TYPE_CACHE = 0x03,
152 NVME_LBART_TYPE_SWAP = 0x04,
153
154 NVME_LBART_ATTRIB_TEMP = 1 << 0,
155 NVME_LBART_ATTRIB_HIDE = 1 << 1,
156};
157
158/* I/O commands */
159
160enum nvme_opcode {
161 nvme_cmd_flush = 0x00,
162 nvme_cmd_write = 0x01,
163 nvme_cmd_read = 0x02,
164 nvme_cmd_write_uncor = 0x04,
165 nvme_cmd_compare = 0x05,
166 nvme_cmd_dsm = 0x09,
167};
168
169struct nvme_common_command {
170 __u8 opcode;
171 __u8 flags;
172 __u16 command_id;
173 __le32 nsid;
174 __u32 cdw2[2];
175 __le64 metadata;
176 __le64 prp1;
177 __le64 prp2;
178 __u32 cdw10[6];
179};
180
181struct nvme_rw_command {
182 __u8 opcode;
183 __u8 flags;
184 __u16 command_id;
185 __le32 nsid;
186 __u64 rsvd2;
187 __le64 metadata;
188 __le64 prp1;
189 __le64 prp2;
190 __le64 slba;
191 __le16 length;
192 __le16 control;
193 __le32 dsmgmt;
194 __le32 reftag;
195 __le16 apptag;
196 __le16 appmask;
197};
198
199enum {
200 NVME_RW_LR = 1 << 15,
201 NVME_RW_FUA = 1 << 14,
202 NVME_RW_DSM_FREQ_UNSPEC = 0,
203 NVME_RW_DSM_FREQ_TYPICAL = 1,
204 NVME_RW_DSM_FREQ_RARE = 2,
205 NVME_RW_DSM_FREQ_READS = 3,
206 NVME_RW_DSM_FREQ_WRITES = 4,
207 NVME_RW_DSM_FREQ_RW = 5,
208 NVME_RW_DSM_FREQ_ONCE = 6,
209 NVME_RW_DSM_FREQ_PREFETCH = 7,
210 NVME_RW_DSM_FREQ_TEMP = 8,
211 NVME_RW_DSM_LATENCY_NONE = 0 << 4,
212 NVME_RW_DSM_LATENCY_IDLE = 1 << 4,
213 NVME_RW_DSM_LATENCY_NORM = 2 << 4,
214 NVME_RW_DSM_LATENCY_LOW = 3 << 4,
215 NVME_RW_DSM_SEQ_REQ = 1 << 6,
216 NVME_RW_DSM_COMPRESSED = 1 << 7,
217};
218
219/* Admin commands */
220
221enum nvme_admin_opcode {
222 nvme_admin_delete_sq = 0x00,
223 nvme_admin_create_sq = 0x01,
224 nvme_admin_get_log_page = 0x02,
225 nvme_admin_delete_cq = 0x04,
226 nvme_admin_create_cq = 0x05,
227 nvme_admin_identify = 0x06,
228 nvme_admin_abort_cmd = 0x08,
229 nvme_admin_set_features = 0x09,
230 nvme_admin_get_features = 0x0a,
231 nvme_admin_async_event = 0x0c,
232 nvme_admin_activate_fw = 0x10,
233 nvme_admin_download_fw = 0x11,
234 nvme_admin_format_nvm = 0x80,
235 nvme_admin_security_send = 0x81,
236 nvme_admin_security_recv = 0x82,
237};
238
239enum {
240 NVME_QUEUE_PHYS_CONTIG = (1 << 0),
241 NVME_CQ_IRQ_ENABLED = (1 << 1),
242 NVME_SQ_PRIO_URGENT = (0 << 1),
243 NVME_SQ_PRIO_HIGH = (1 << 1),
244 NVME_SQ_PRIO_MEDIUM = (2 << 1),
245 NVME_SQ_PRIO_LOW = (3 << 1),
246 NVME_FEAT_ARBITRATION = 0x01,
247 NVME_FEAT_POWER_MGMT = 0x02,
248 NVME_FEAT_LBA_RANGE = 0x03,
249 NVME_FEAT_TEMP_THRESH = 0x04,
250 NVME_FEAT_ERR_RECOVERY = 0x05,
251 NVME_FEAT_VOLATILE_WC = 0x06,
252 NVME_FEAT_NUM_QUEUES = 0x07,
253 NVME_FEAT_IRQ_COALESCE = 0x08,
254 NVME_FEAT_IRQ_CONFIG = 0x09,
255 NVME_FEAT_WRITE_ATOMIC = 0x0a,
256 NVME_FEAT_ASYNC_EVENT = 0x0b,
257 NVME_FEAT_SW_PROGRESS = 0x0c,
258};
259
260struct nvme_identify {
261 __u8 opcode;
262 __u8 flags;
263 __u16 command_id;
264 __le32 nsid;
265 __u64 rsvd2[2];
266 __le64 prp1;
267 __le64 prp2;
268 __le32 cns;
269 __u32 rsvd11[5];
270};
271
272struct nvme_features {
273 __u8 opcode;
274 __u8 flags;
275 __u16 command_id;
276 __le32 nsid;
277 __u64 rsvd2[2];
278 __le64 prp1;
279 __le64 prp2;
280 __le32 fid;
281 __le32 dword11;
282 __u32 rsvd12[4];
283};
284
285struct nvme_create_cq {
286 __u8 opcode;
287 __u8 flags;
288 __u16 command_id;
289 __u32 rsvd1[5];
290 __le64 prp1;
291 __u64 rsvd8;
292 __le16 cqid;
293 __le16 qsize;
294 __le16 cq_flags;
295 __le16 irq_vector;
296 __u32 rsvd12[4];
297};
298
299struct nvme_create_sq {
300 __u8 opcode;
301 __u8 flags;
302 __u16 command_id;
303 __u32 rsvd1[5];
304 __le64 prp1;
305 __u64 rsvd8;
306 __le16 sqid;
307 __le16 qsize;
308 __le16 sq_flags;
309 __le16 cqid;
310 __u32 rsvd12[4];
311};
312
313struct nvme_delete_queue {
314 __u8 opcode;
315 __u8 flags;
316 __u16 command_id;
317 __u32 rsvd1[9];
318 __le16 qid;
319 __u16 rsvd10;
320 __u32 rsvd11[5];
321};
322
323struct nvme_download_firmware {
324 __u8 opcode;
325 __u8 flags;
326 __u16 command_id;
327 __u32 rsvd1[5];
328 __le64 prp1;
329 __le64 prp2;
330 __le32 numd;
331 __le32 offset;
332 __u32 rsvd12[4];
333};
334
335struct nvme_command {
336 union {
337 struct nvme_common_command common;
338 struct nvme_rw_command rw;
339 struct nvme_identify identify;
340 struct nvme_features features;
341 struct nvme_create_cq create_cq;
342 struct nvme_create_sq create_sq;
343 struct nvme_delete_queue delete_queue;
344 struct nvme_download_firmware dlfw;
345 };
346};
347
348enum {
349 NVME_SC_SUCCESS = 0x0,
350 NVME_SC_INVALID_OPCODE = 0x1,
351 NVME_SC_INVALID_FIELD = 0x2,
352 NVME_SC_CMDID_CONFLICT = 0x3,
353 NVME_SC_DATA_XFER_ERROR = 0x4,
354 NVME_SC_POWER_LOSS = 0x5,
355 NVME_SC_INTERNAL = 0x6,
356 NVME_SC_ABORT_REQ = 0x7,
357 NVME_SC_ABORT_QUEUE = 0x8,
358 NVME_SC_FUSED_FAIL = 0x9,
359 NVME_SC_FUSED_MISSING = 0xa,
360 NVME_SC_INVALID_NS = 0xb,
361 NVME_SC_LBA_RANGE = 0x80,
362 NVME_SC_CAP_EXCEEDED = 0x81,
363 NVME_SC_NS_NOT_READY = 0x82,
364 NVME_SC_CQ_INVALID = 0x100,
365 NVME_SC_QID_INVALID = 0x101,
366 NVME_SC_QUEUE_SIZE = 0x102,
367 NVME_SC_ABORT_LIMIT = 0x103,
368 NVME_SC_ABORT_MISSING = 0x104,
369 NVME_SC_ASYNC_LIMIT = 0x105,
370 NVME_SC_FIRMWARE_SLOT = 0x106,
371 NVME_SC_FIRMWARE_IMAGE = 0x107,
372 NVME_SC_INVALID_VECTOR = 0x108,
373 NVME_SC_INVALID_LOG_PAGE = 0x109,
374 NVME_SC_INVALID_FORMAT = 0x10a,
375 NVME_SC_BAD_ATTRIBUTES = 0x180,
376 NVME_SC_WRITE_FAULT = 0x280,
377 NVME_SC_READ_ERROR = 0x281,
378 NVME_SC_GUARD_CHECK = 0x282,
379 NVME_SC_APPTAG_CHECK = 0x283,
380 NVME_SC_REFTAG_CHECK = 0x284,
381 NVME_SC_COMPARE_FAILED = 0x285,
382 NVME_SC_ACCESS_DENIED = 0x286,
383};
384
385struct nvme_completion {
386 __le32 result; /* Used by admin commands to return data */
387 __u32 rsvd;
388 __le16 sq_head; /* how much of this queue may be reclaimed */
389 __le16 sq_id; /* submission queue that generated this entry */
390 __u16 command_id; /* of the command which completed */
391 __le16 status; /* did the command fail, and if so, why? */
392};
393
394struct nvme_user_io {
395 __u8 opcode;
396 __u8 flags;
397 __u16 control;
398 __u16 nblocks;
399 __u16 rsvd;
400 __u64 metadata;
401 __u64 addr;
402 __u64 slba;
403 __u32 dsmgmt;
404 __u32 reftag;
405 __u16 apptag;
406 __u16 appmask;
407};
408
409struct nvme_admin_cmd {
410 __u8 opcode;
411 __u8 flags;
412 __u16 rsvd1;
413 __u32 nsid;
414 __u32 cdw2;
415 __u32 cdw3;
416 __u64 metadata;
417 __u64 addr;
418 __u32 metadata_len;
419 __u32 data_len;
420 __u32 cdw10;
421 __u32 cdw11;
422 __u32 cdw12;
423 __u32 cdw13;
424 __u32 cdw14;
425 __u32 cdw15;
426 __u32 timeout_ms;
427 __u32 result;
428};
429
430#define NVME_IOCTL_ID _IO('N', 0x40)
431#define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd)
432#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io)
433
434#endif /* _LINUX_NVME_H */
diff --git a/include/linux/quota.h b/include/linux/quota.h
index cb7855699037..c09fa042b5ea 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -230,7 +230,11 @@ struct mem_dqinfo {
230struct super_block; 230struct super_block;
231 231
232#define DQF_MASK 0xffff /* Mask for format specific flags */ 232#define DQF_MASK 0xffff /* Mask for format specific flags */
233#define DQF_INFO_DIRTY_B 16 233#define DQF_GETINFO_MASK 0x1ffff /* Mask for flags passed to userspace */
234#define DQF_SETINFO_MASK 0xffff /* Mask for flags modifiable from userspace */
235#define DQF_SYS_FILE_B 16
236#define DQF_SYS_FILE (1 << DQF_SYS_FILE_B) /* Quota file stored as system file */
237#define DQF_INFO_DIRTY_B 31
234#define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B) /* Is info dirty? */ 238#define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B) /* Is info dirty? */
235 239
236extern void mark_info_dirty(struct super_block *sb, int type); 240extern void mark_info_dirty(struct super_block *sb, int type);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4032ec1cf836..513f52459872 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2088,7 +2088,7 @@ extern int sched_setscheduler_nocheck(struct task_struct *, int,
2088extern struct task_struct *idle_task(int cpu); 2088extern struct task_struct *idle_task(int cpu);
2089/** 2089/**
2090 * is_idle_task - is the specified task an idle task? 2090 * is_idle_task - is the specified task an idle task?
2091 * @tsk: the task in question. 2091 * @p: the task in question.
2092 */ 2092 */
2093static inline bool is_idle_task(struct task_struct *p) 2093static inline bool is_idle_task(struct task_struct *p)
2094{ 2094{
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index e4c711c6f321..79ab2555b3b0 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -48,6 +48,7 @@ extern struct file *shmem_file_setup(const char *name,
48 loff_t size, unsigned long flags); 48 loff_t size, unsigned long flags);
49extern int shmem_zero_setup(struct vm_area_struct *); 49extern int shmem_zero_setup(struct vm_area_struct *);
50extern int shmem_lock(struct file *file, int lock, struct user_struct *user); 50extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
51extern void shmem_unlock_mapping(struct address_space *mapping);
51extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 52extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
52 pgoff_t index, gfp_t gfp_mask); 53 pgoff_t index, gfp_t gfp_mask);
53extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); 54extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 95040cc33107..91784a4f8608 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -357,14 +357,29 @@ extern bool pm_save_wakeup_count(unsigned int count);
357 357
358static inline void lock_system_sleep(void) 358static inline void lock_system_sleep(void)
359{ 359{
360 freezer_do_not_count(); 360 current->flags |= PF_FREEZER_SKIP;
361 mutex_lock(&pm_mutex); 361 mutex_lock(&pm_mutex);
362} 362}
363 363
364static inline void unlock_system_sleep(void) 364static inline void unlock_system_sleep(void)
365{ 365{
366 /*
367 * Don't use freezer_count() because we don't want the call to
368 * try_to_freeze() here.
369 *
370 * Reason:
371 * Fundamentally, we just don't need it, because freezing condition
372 * doesn't come into effect until we release the pm_mutex lock,
373 * since the freezer always works with pm_mutex held.
374 *
375 * More importantly, in the case of hibernation,
376 * unlock_system_sleep() gets called in snapshot_read() and
377 * snapshot_write() when the freezing condition is still in effect.
378 * Which means, if we use try_to_freeze() here, it would make them
379 * enter the refrigerator, thus causing hibernation to lockup.
380 */
381 current->flags &= ~PF_FREEZER_SKIP;
366 mutex_unlock(&pm_mutex); 382 mutex_unlock(&pm_mutex);
367 freezer_count();
368} 383}
369 384
370#else /* !CONFIG_PM_SLEEP */ 385#else /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 06061a7f8e69..3e60228e7299 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -273,7 +273,7 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
273#endif 273#endif
274 274
275extern int page_evictable(struct page *page, struct vm_area_struct *vma); 275extern int page_evictable(struct page *page, struct vm_area_struct *vma);
276extern void scan_mapping_unevictable_pages(struct address_space *); 276extern void check_move_unevictable_pages(struct page **, int nr_pages);
277 277
278extern unsigned long scan_unevictable_pages; 278extern unsigned long scan_unevictable_pages;
279extern int scan_unevictable_handler(struct ctl_table *, int, 279extern int scan_unevictable_handler(struct ctl_table *, int,
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 27a4e16d2bf1..69d845739bc2 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1073,6 +1073,7 @@ typedef void (*usb_complete_t)(struct urb *);
1073 * which the host controller driver should use in preference to the 1073 * which the host controller driver should use in preference to the
1074 * transfer_buffer. 1074 * transfer_buffer.
1075 * @sg: scatter gather buffer list 1075 * @sg: scatter gather buffer list
1076 * @num_mapped_sgs: (internal) number of mapped sg entries
1076 * @num_sgs: number of entries in the sg list 1077 * @num_sgs: number of entries in the sg list
1077 * @transfer_buffer_length: How big is transfer_buffer. The transfer may 1078 * @transfer_buffer_length: How big is transfer_buffer. The transfer may
1078 * be broken up into chunks according to the current maximum packet 1079 * be broken up into chunks according to the current maximum packet
diff --git a/include/media/tuner.h b/include/media/tuner.h
index 89c290b69a5c..29e1920e7339 100644
--- a/include/media/tuner.h
+++ b/include/media/tuner.h
@@ -127,7 +127,6 @@
127#define TUNER_PHILIPS_FMD1216MEX_MK3 78 127#define TUNER_PHILIPS_FMD1216MEX_MK3 78
128#define TUNER_PHILIPS_FM1216MK5 79 128#define TUNER_PHILIPS_FM1216MK5 79
129#define TUNER_PHILIPS_FQ1216LME_MK3 80 /* Active loopthrough, no FM */ 129#define TUNER_PHILIPS_FQ1216LME_MK3 80 /* Active loopthrough, no FM */
130#define TUNER_XC4000 81 /* Xceive Silicon Tuner */
131 130
132#define TUNER_PARTSNIC_PTI_5NF05 81 131#define TUNER_PARTSNIC_PTI_5NF05 81
133#define TUNER_PHILIPS_CU1216L 82 132#define TUNER_PHILIPS_CU1216L 82
@@ -136,6 +135,8 @@
136#define TUNER_PHILIPS_FQ1236_MK5 85 /* NTSC, TDA9885, no FM radio */ 135#define TUNER_PHILIPS_FQ1236_MK5 85 /* NTSC, TDA9885, no FM radio */
137#define TUNER_TENA_TNF_5337 86 136#define TUNER_TENA_TNF_5337 86
138 137
138#define TUNER_XC4000 87 /* Xceive Silicon Tuner */
139
139/* tv card specific */ 140/* tv card specific */
140#define TDA9887_PRESENT (1<<0) 141#define TDA9887_PRESENT (1<<0)
141#define TDA9887_PORT1_INACTIVE (1<<1) 142#define TDA9887_PORT1_INACTIVE (1<<1)
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 15f4be7d768e..a067d30ce73e 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1140,6 +1140,7 @@ struct cfg80211_disassoc_request {
1140 * @bssid: Fixed BSSID requested, maybe be %NULL, if set do not 1140 * @bssid: Fixed BSSID requested, maybe be %NULL, if set do not
1141 * search for IBSSs with a different BSSID. 1141 * search for IBSSs with a different BSSID.
1142 * @channel: The channel to use if no IBSS can be found to join. 1142 * @channel: The channel to use if no IBSS can be found to join.
1143 * @channel_type: channel type (HT mode)
1143 * @channel_fixed: The channel should be fixed -- do not search for 1144 * @channel_fixed: The channel should be fixed -- do not search for
1144 * IBSSs to join on other channels. 1145 * IBSSs to join on other channels.
1145 * @ie: information element(s) to include in the beacon 1146 * @ie: information element(s) to include in the beacon
@@ -1978,6 +1979,11 @@ struct wiphy_wowlan_support {
1978 * configured as RX antennas. Antenna configuration commands will be 1979 * configured as RX antennas. Antenna configuration commands will be
1979 * rejected unless this or @available_antennas_tx is set. 1980 * rejected unless this or @available_antennas_tx is set.
1980 * 1981 *
1982 * @probe_resp_offload:
1983 * Bitmap of supported protocols for probe response offloading.
1984 * See &enum nl80211_probe_resp_offload_support_attr. Only valid
1985 * when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set.
1986 *
1981 * @max_remain_on_channel_duration: Maximum time a remain-on-channel operation 1987 * @max_remain_on_channel_duration: Maximum time a remain-on-channel operation
1982 * may request, if implemented. 1988 * may request, if implemented.
1983 * 1989 *
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 5d1a758e0595..6a3922fe0be0 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -857,7 +857,7 @@ struct fc_lport {
857 enum fc_lport_state state; 857 enum fc_lport_state state;
858 unsigned long boot_time; 858 unsigned long boot_time;
859 struct fc_host_statistics host_stats; 859 struct fc_host_statistics host_stats;
860 struct fcoe_dev_stats *dev_stats; 860 struct fcoe_dev_stats __percpu *dev_stats;
861 u8 retry_count; 861 u8 retry_count;
862 862
863 /* Fabric information */ 863 /* Fabric information */
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
new file mode 100644
index 000000000000..4866499bdeeb
--- /dev/null
+++ b/include/target/target_core_backend.h
@@ -0,0 +1,65 @@
1#ifndef TARGET_CORE_BACKEND_H
2#define TARGET_CORE_BACKEND_H
3
4#define TRANSPORT_PLUGIN_PHBA_PDEV 1
5#define TRANSPORT_PLUGIN_VHBA_PDEV 2
6#define TRANSPORT_PLUGIN_VHBA_VDEV 3
7
8struct se_subsystem_api {
9 struct list_head sub_api_list;
10
11 char name[16];
12 struct module *owner;
13
14 u8 transport_type;
15
16 unsigned int fua_write_emulated : 1;
17 unsigned int write_cache_emulated : 1;
18
19 int (*attach_hba)(struct se_hba *, u32);
20 void (*detach_hba)(struct se_hba *);
21 int (*pmode_enable_hba)(struct se_hba *, unsigned long);
22 void *(*allocate_virtdevice)(struct se_hba *, const char *);
23 struct se_device *(*create_virtdevice)(struct se_hba *,
24 struct se_subsystem_dev *, void *);
25 void (*free_device)(void *);
26 int (*transport_complete)(struct se_task *task);
27 struct se_task *(*alloc_task)(unsigned char *cdb);
28 int (*do_task)(struct se_task *);
29 int (*do_discard)(struct se_device *, sector_t, u32);
30 void (*do_sync_cache)(struct se_task *);
31 void (*free_task)(struct se_task *);
32 ssize_t (*check_configfs_dev_params)(struct se_hba *,
33 struct se_subsystem_dev *);
34 ssize_t (*set_configfs_dev_params)(struct se_hba *,
35 struct se_subsystem_dev *, const char *, ssize_t);
36 ssize_t (*show_configfs_dev_params)(struct se_hba *,
37 struct se_subsystem_dev *, char *);
38 u32 (*get_device_rev)(struct se_device *);
39 u32 (*get_device_type)(struct se_device *);
40 sector_t (*get_blocks)(struct se_device *);
41 unsigned char *(*get_sense_buffer)(struct se_task *);
42};
43
44int transport_subsystem_register(struct se_subsystem_api *);
45void transport_subsystem_release(struct se_subsystem_api *);
46
47struct se_device *transport_add_device_to_core_hba(struct se_hba *,
48 struct se_subsystem_api *, struct se_subsystem_dev *, u32,
49 void *, struct se_dev_limits *, const char *, const char *);
50
51void transport_complete_sync_cache(struct se_cmd *, int);
52void transport_complete_task(struct se_task *, int);
53
54void target_get_task_cdb(struct se_task *, unsigned char *);
55
56void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
57int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
58int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
59int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
60
61/* core helpers also used by command snooping in pscsi */
62void *transport_kmap_first_data_page(struct se_cmd *);
63void transport_kunmap_first_data_page(struct se_cmd *);
64
65#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 6873c7dd9145..daf532bc721a 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -10,6 +10,7 @@
10#include <net/tcp.h> 10#include <net/tcp.h>
11 11
12#define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml" 12#define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml"
13#define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION
13 14
14/* Maximum Number of LUNs per Target Portal Group */ 15/* Maximum Number of LUNs per Target Portal Group */
15/* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */ 16/* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
@@ -34,6 +35,7 @@
34#define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE 35#define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE
35/* Used by transport_send_check_condition_and_sense() */ 36/* Used by transport_send_check_condition_and_sense() */
36#define SPC_SENSE_KEY_OFFSET 2 37#define SPC_SENSE_KEY_OFFSET 2
38#define SPC_ADD_SENSE_LEN_OFFSET 7
37#define SPC_ASC_KEY_OFFSET 12 39#define SPC_ASC_KEY_OFFSET 12
38#define SPC_ASCQ_KEY_OFFSET 13 40#define SPC_ASCQ_KEY_OFFSET 13
39#define TRANSPORT_IQN_LEN 224 41#define TRANSPORT_IQN_LEN 224
@@ -53,6 +55,72 @@
53/* Used by transport_get_inquiry_vpd_device_ident() */ 55/* Used by transport_get_inquiry_vpd_device_ident() */
54#define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254 56#define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254
55 57
58/* Attempts before moving from SHORT to LONG */
59#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3
60#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */
61#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG 10 /* In milliseconds */
62
63#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
64
65/*
66 * struct se_subsystem_dev->su_dev_flags
67*/
68#define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001
69#define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002
70#define SDF_USING_UDEV_PATH 0x00000004
71#define SDF_USING_ALIAS 0x00000008
72
73/*
74 * struct se_device->dev_flags
75 */
76#define DF_READ_ONLY 0x00000001
77#define DF_SPC2_RESERVATIONS 0x00000002
78#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004
79
80/* struct se_dev_attrib sanity values */
81/* Default max_unmap_lba_count */
82#define DA_MAX_UNMAP_LBA_COUNT 0
83/* Default max_unmap_block_desc_count */
84#define DA_MAX_UNMAP_BLOCK_DESC_COUNT 0
85/* Default unmap_granularity */
86#define DA_UNMAP_GRANULARITY_DEFAULT 0
87/* Default unmap_granularity_alignment */
88#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
89/* Emulation for Direct Page Out */
90#define DA_EMULATE_DPO 0
91/* Emulation for Forced Unit Access WRITEs */
92#define DA_EMULATE_FUA_WRITE 1
93/* Emulation for Forced Unit Access READs */
94#define DA_EMULATE_FUA_READ 0
95/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
96#define DA_EMULATE_WRITE_CACHE 0
97/* Emulation for UNIT ATTENTION Interlock Control */
98#define DA_EMULATE_UA_INTLLCK_CTRL 0
99/* Emulation for TASK_ABORTED status (TAS) by default */
100#define DA_EMULATE_TAS 1
101/* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
102#define DA_EMULATE_TPU 0
103/*
104 * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using
105 * block/blk-lib.c:blkdev_issue_discard()
106 */
107#define DA_EMULATE_TPWS 0
108/* No Emulation for PSCSI by default */
109#define DA_EMULATE_RESERVATIONS 0
110/* No Emulation for PSCSI by default */
111#define DA_EMULATE_ALUA 0
112/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
113#define DA_ENFORCE_PR_ISIDS 1
114#define DA_STATUS_MAX_SECTORS_MIN 16
115#define DA_STATUS_MAX_SECTORS_MAX 8192
116/* By default don't report non-rotating (solid state) medium */
117#define DA_IS_NONROT 0
118/* Queue Algorithm Modifier default for restricted reordering in control mode page */
119#define DA_EMULATE_REST_REORD 0
120
121#define SE_MODE_PAGE_BUF 512
122
123
56/* struct se_hba->hba_flags */ 124/* struct se_hba->hba_flags */
57enum hba_flags_table { 125enum hba_flags_table {
58 HBA_FLAGS_INTERNAL_USE = 0x01, 126 HBA_FLAGS_INTERNAL_USE = 0x01,
@@ -71,11 +139,12 @@ enum transport_tpg_type_table {
71 TRANSPORT_TPG_TYPE_DISCOVERY = 1, 139 TRANSPORT_TPG_TYPE_DISCOVERY = 1,
72}; 140};
73 141
74/* Used for generate timer flags */ 142/* struct se_task->task_flags */
75enum se_task_flags { 143enum se_task_flags {
76 TF_ACTIVE = (1 << 0), 144 TF_ACTIVE = (1 << 0),
77 TF_SENT = (1 << 1), 145 TF_SENT = (1 << 1),
78 TF_REQUEST_STOP = (1 << 2), 146 TF_REQUEST_STOP = (1 << 2),
147 TF_HAS_SENSE = (1 << 3),
79}; 148};
80 149
81/* Special transport agnostic struct se_cmd->t_states */ 150/* Special transport agnostic struct se_cmd->t_states */
@@ -158,9 +227,38 @@ enum tcm_sense_reason_table {
158 TCM_RESERVATION_CONFLICT = 0x10, 227 TCM_RESERVATION_CONFLICT = 0x10,
159}; 228};
160 229
230enum target_sc_flags_table {
231 TARGET_SCF_BIDI_OP = 0x01,
232 TARGET_SCF_ACK_KREF = 0x02,
233};
234
235/* fabric independent task management function values */
236enum tcm_tmreq_table {
237 TMR_ABORT_TASK = 1,
238 TMR_ABORT_TASK_SET = 2,
239 TMR_CLEAR_ACA = 3,
240 TMR_CLEAR_TASK_SET = 4,
241 TMR_LUN_RESET = 5,
242 TMR_TARGET_WARM_RESET = 6,
243 TMR_TARGET_COLD_RESET = 7,
244 TMR_FABRIC_TMR = 255,
245};
246
247/* fabric independent task management response values */
248enum tcm_tmrsp_table {
249 TMR_FUNCTION_COMPLETE = 0,
250 TMR_TASK_DOES_NOT_EXIST = 1,
251 TMR_LUN_DOES_NOT_EXIST = 2,
252 TMR_TASK_STILL_ALLEGIANT = 3,
253 TMR_TASK_FAILOVER_NOT_SUPPORTED = 4,
254 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED = 5,
255 TMR_FUNCTION_AUTHORIZATION_FAILED = 6,
256 TMR_FUNCTION_REJECTED = 255,
257};
258
161struct se_obj { 259struct se_obj {
162 atomic_t obj_access_count; 260 atomic_t obj_access_count;
163} ____cacheline_aligned; 261};
164 262
165/* 263/*
166 * Used by TCM Core internally to signal if ALUA emulation is enabled or 264 * Used by TCM Core internally to signal if ALUA emulation is enabled or
@@ -207,7 +305,7 @@ struct t10_alua {
207 struct config_group alua_tg_pt_gps_group; 305 struct config_group alua_tg_pt_gps_group;
208 int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *); 306 int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *);
209 struct list_head tg_pt_gps_list; 307 struct list_head tg_pt_gps_list;
210} ____cacheline_aligned; 308};
211 309
212struct t10_alua_lu_gp { 310struct t10_alua_lu_gp {
213 u16 lu_gp_id; 311 u16 lu_gp_id;
@@ -218,7 +316,7 @@ struct t10_alua_lu_gp {
218 struct config_group lu_gp_group; 316 struct config_group lu_gp_group;
219 struct list_head lu_gp_node; 317 struct list_head lu_gp_node;
220 struct list_head lu_gp_mem_list; 318 struct list_head lu_gp_mem_list;
221} ____cacheline_aligned; 319};
222 320
223struct t10_alua_lu_gp_member { 321struct t10_alua_lu_gp_member {
224 bool lu_gp_assoc; 322 bool lu_gp_assoc;
@@ -227,7 +325,7 @@ struct t10_alua_lu_gp_member {
227 struct t10_alua_lu_gp *lu_gp; 325 struct t10_alua_lu_gp *lu_gp;
228 struct se_device *lu_gp_mem_dev; 326 struct se_device *lu_gp_mem_dev;
229 struct list_head lu_gp_mem_list; 327 struct list_head lu_gp_mem_list;
230} ____cacheline_aligned; 328};
231 329
232struct t10_alua_tg_pt_gp { 330struct t10_alua_tg_pt_gp {
233 u16 tg_pt_gp_id; 331 u16 tg_pt_gp_id;
@@ -250,7 +348,7 @@ struct t10_alua_tg_pt_gp {
250 struct config_group tg_pt_gp_group; 348 struct config_group tg_pt_gp_group;
251 struct list_head tg_pt_gp_list; 349 struct list_head tg_pt_gp_list;
252 struct list_head tg_pt_gp_mem_list; 350 struct list_head tg_pt_gp_mem_list;
253} ____cacheline_aligned; 351};
254 352
255struct t10_alua_tg_pt_gp_member { 353struct t10_alua_tg_pt_gp_member {
256 bool tg_pt_gp_assoc; 354 bool tg_pt_gp_assoc;
@@ -259,7 +357,7 @@ struct t10_alua_tg_pt_gp_member {
259 struct t10_alua_tg_pt_gp *tg_pt_gp; 357 struct t10_alua_tg_pt_gp *tg_pt_gp;
260 struct se_port *tg_pt; 358 struct se_port *tg_pt;
261 struct list_head tg_pt_gp_mem_list; 359 struct list_head tg_pt_gp_mem_list;
262} ____cacheline_aligned; 360};
263 361
264struct t10_vpd { 362struct t10_vpd {
265 unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN]; 363 unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN];
@@ -269,7 +367,7 @@ struct t10_vpd {
269 u32 association; 367 u32 association;
270 u32 device_identifier_type; 368 u32 device_identifier_type;
271 struct list_head vpd_list; 369 struct list_head vpd_list;
272} ____cacheline_aligned; 370};
273 371
274struct t10_wwn { 372struct t10_wwn {
275 char vendor[8]; 373 char vendor[8];
@@ -280,7 +378,7 @@ struct t10_wwn {
280 struct se_subsystem_dev *t10_sub_dev; 378 struct se_subsystem_dev *t10_sub_dev;
281 struct config_group t10_wwn_group; 379 struct config_group t10_wwn_group;
282 struct list_head t10_vpd_list; 380 struct list_head t10_vpd_list;
283} ____cacheline_aligned; 381};
284 382
285 383
286/* 384/*
@@ -333,7 +431,7 @@ struct t10_pr_registration {
333 struct list_head pr_reg_aptpl_list; 431 struct list_head pr_reg_aptpl_list;
334 struct list_head pr_reg_atp_list; 432 struct list_head pr_reg_atp_list;
335 struct list_head pr_reg_atp_mem_list; 433 struct list_head pr_reg_atp_mem_list;
336} ____cacheline_aligned; 434};
337 435
338/* 436/*
339 * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS, 437 * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS,
@@ -374,20 +472,20 @@ struct t10_reservation {
374 struct list_head registration_list; 472 struct list_head registration_list;
375 struct list_head aptpl_reg_list; 473 struct list_head aptpl_reg_list;
376 struct t10_reservation_ops pr_ops; 474 struct t10_reservation_ops pr_ops;
377} ____cacheline_aligned; 475};
378 476
379struct se_queue_req { 477struct se_queue_req {
380 int state; 478 int state;
381 struct se_cmd *cmd; 479 struct se_cmd *cmd;
382 struct list_head qr_list; 480 struct list_head qr_list;
383} ____cacheline_aligned; 481};
384 482
385struct se_queue_obj { 483struct se_queue_obj {
386 atomic_t queue_cnt; 484 atomic_t queue_cnt;
387 spinlock_t cmd_queue_lock; 485 spinlock_t cmd_queue_lock;
388 struct list_head qobj_list; 486 struct list_head qobj_list;
389 wait_queue_head_t thread_wq; 487 wait_queue_head_t thread_wq;
390} ____cacheline_aligned; 488};
391 489
392struct se_task { 490struct se_task {
393 unsigned long long task_lba; 491 unsigned long long task_lba;
@@ -397,16 +495,14 @@ struct se_task {
397 struct scatterlist *task_sg; 495 struct scatterlist *task_sg;
398 u32 task_sg_nents; 496 u32 task_sg_nents;
399 u16 task_flags; 497 u16 task_flags;
400 u8 task_sense;
401 u8 task_scsi_status; 498 u8 task_scsi_status;
402 int task_error_status;
403 enum dma_data_direction task_data_direction; 499 enum dma_data_direction task_data_direction;
404 atomic_t task_state_active;
405 struct list_head t_list; 500 struct list_head t_list;
406 struct list_head t_execute_list; 501 struct list_head t_execute_list;
407 struct list_head t_state_list; 502 struct list_head t_state_list;
503 bool t_state_active;
408 struct completion task_stop_comp; 504 struct completion task_stop_comp;
409} ____cacheline_aligned; 505};
410 506
411struct se_cmd { 507struct se_cmd {
412 /* SAM response code being sent to initiator */ 508 /* SAM response code being sent to initiator */
@@ -451,6 +547,7 @@ struct se_cmd {
451 struct list_head se_queue_node; 547 struct list_head se_queue_node;
452 struct list_head se_cmd_list; 548 struct list_head se_cmd_list;
453 struct completion cmd_wait_comp; 549 struct completion cmd_wait_comp;
550 struct kref cmd_kref;
454 struct target_core_fabric_ops *se_tfo; 551 struct target_core_fabric_ops *se_tfo;
455 int (*execute_task)(struct se_task *); 552 int (*execute_task)(struct se_task *);
456 void (*transport_complete_callback)(struct se_cmd *); 553 void (*transport_complete_callback)(struct se_cmd *);
@@ -492,7 +589,7 @@ struct se_cmd {
492 struct list_head t_task_list; 589 struct list_head t_task_list;
493 u32 t_task_list_num; 590 u32 t_task_list_num;
494 591
495} ____cacheline_aligned; 592};
496 593
497struct se_tmr_req { 594struct se_tmr_req {
498 /* Task Management function to be preformed */ 595 /* Task Management function to be preformed */
@@ -510,7 +607,7 @@ struct se_tmr_req {
510 struct se_device *tmr_dev; 607 struct se_device *tmr_dev;
511 struct se_lun *tmr_lun; 608 struct se_lun *tmr_lun;
512 struct list_head tmr_list; 609 struct list_head tmr_list;
513} ____cacheline_aligned; 610};
514 611
515struct se_ua { 612struct se_ua {
516 u8 ua_asc; 613 u8 ua_asc;
@@ -518,7 +615,7 @@ struct se_ua {
518 struct se_node_acl *ua_nacl; 615 struct se_node_acl *ua_nacl;
519 struct list_head ua_dev_list; 616 struct list_head ua_dev_list;
520 struct list_head ua_nacl_list; 617 struct list_head ua_nacl_list;
521} ____cacheline_aligned; 618};
522 619
523struct se_node_acl { 620struct se_node_acl {
524 char initiatorname[TRANSPORT_IQN_LEN]; 621 char initiatorname[TRANSPORT_IQN_LEN];
@@ -545,7 +642,7 @@ struct se_node_acl {
545 struct config_group *acl_default_groups[5]; 642 struct config_group *acl_default_groups[5];
546 struct list_head acl_list; 643 struct list_head acl_list;
547 struct list_head acl_sess_list; 644 struct list_head acl_sess_list;
548} ____cacheline_aligned; 645};
549 646
550struct se_session { 647struct se_session {
551 unsigned sess_tearing_down:1; 648 unsigned sess_tearing_down:1;
@@ -558,7 +655,7 @@ struct se_session {
558 struct list_head sess_cmd_list; 655 struct list_head sess_cmd_list;
559 struct list_head sess_wait_list; 656 struct list_head sess_wait_list;
560 spinlock_t sess_cmd_lock; 657 spinlock_t sess_cmd_lock;
561} ____cacheline_aligned; 658};
562 659
563struct se_device; 660struct se_device;
564struct se_transform_info; 661struct se_transform_info;
@@ -578,7 +675,7 @@ struct se_lun_acl {
578 struct list_head lacl_list; 675 struct list_head lacl_list;
579 struct config_group se_lun_group; 676 struct config_group se_lun_group;
580 struct se_ml_stat_grps ml_stat_grps; 677 struct se_ml_stat_grps ml_stat_grps;
581} ____cacheline_aligned; 678};
582 679
583struct se_dev_entry { 680struct se_dev_entry {
584 bool def_pr_registered; 681 bool def_pr_registered;
@@ -603,7 +700,7 @@ struct se_dev_entry {
603 struct se_lun *se_lun; 700 struct se_lun *se_lun;
604 struct list_head alua_port_list; 701 struct list_head alua_port_list;
605 struct list_head ua_list; 702 struct list_head ua_list;
606} ____cacheline_aligned; 703};
607 704
608struct se_dev_limits { 705struct se_dev_limits {
609 /* Max supported HW queue depth */ 706 /* Max supported HW queue depth */
@@ -612,7 +709,7 @@ struct se_dev_limits {
612 u32 queue_depth; 709 u32 queue_depth;
613 /* From include/linux/blkdev.h for the other HW/SW limits. */ 710 /* From include/linux/blkdev.h for the other HW/SW limits. */
614 struct queue_limits limits; 711 struct queue_limits limits;
615} ____cacheline_aligned; 712};
616 713
617struct se_dev_attrib { 714struct se_dev_attrib {
618 int emulate_dpo; 715 int emulate_dpo;
@@ -641,7 +738,7 @@ struct se_dev_attrib {
641 u32 unmap_granularity_alignment; 738 u32 unmap_granularity_alignment;
642 struct se_subsystem_dev *da_sub_dev; 739 struct se_subsystem_dev *da_sub_dev;
643 struct config_group da_group; 740 struct config_group da_group;
644} ____cacheline_aligned; 741};
645 742
646struct se_dev_stat_grps { 743struct se_dev_stat_grps {
647 struct config_group stat_group; 744 struct config_group stat_group;
@@ -674,7 +771,7 @@ struct se_subsystem_dev {
674 struct config_group se_dev_pr_group; 771 struct config_group se_dev_pr_group;
675 /* For target_core_stat.c groups */ 772 /* For target_core_stat.c groups */
676 struct se_dev_stat_grps dev_stat_grps; 773 struct se_dev_stat_grps dev_stat_grps;
677} ____cacheline_aligned; 774};
678 775
679struct se_device { 776struct se_device {
680 /* RELATIVE TARGET PORT IDENTIFER Counter */ 777 /* RELATIVE TARGET PORT IDENTIFER Counter */
@@ -685,7 +782,6 @@ struct se_device {
685 u32 dev_port_count; 782 u32 dev_port_count;
686 /* See transport_device_status_table */ 783 /* See transport_device_status_table */
687 u32 dev_status; 784 u32 dev_status;
688 u32 dev_tcq_window_closed;
689 /* Physical device queue depth */ 785 /* Physical device queue depth */
690 u32 queue_depth; 786 u32 queue_depth;
691 /* Used for SPC-2 reservations enforce of ISIDs */ 787 /* Used for SPC-2 reservations enforce of ISIDs */
@@ -702,7 +798,6 @@ struct se_device {
702 spinlock_t stats_lock; 798 spinlock_t stats_lock;
703 /* Active commands on this virtual SE device */ 799 /* Active commands on this virtual SE device */
704 atomic_t simple_cmds; 800 atomic_t simple_cmds;
705 atomic_t depth_left;
706 atomic_t dev_ordered_id; 801 atomic_t dev_ordered_id;
707 atomic_t execute_tasks; 802 atomic_t execute_tasks;
708 atomic_t dev_ordered_sync; 803 atomic_t dev_ordered_sync;
@@ -740,7 +835,7 @@ struct se_device {
740 struct se_subsystem_api *transport; 835 struct se_subsystem_api *transport;
741 /* Linked list for struct se_hba struct se_device list */ 836 /* Linked list for struct se_hba struct se_device list */
742 struct list_head dev_list; 837 struct list_head dev_list;
743} ____cacheline_aligned; 838};
744 839
745struct se_hba { 840struct se_hba {
746 u16 hba_tpgt; 841 u16 hba_tpgt;
@@ -759,7 +854,7 @@ struct se_hba {
759 struct config_group hba_group; 854 struct config_group hba_group;
760 struct mutex hba_access_mutex; 855 struct mutex hba_access_mutex;
761 struct se_subsystem_api *transport; 856 struct se_subsystem_api *transport;
762} ____cacheline_aligned; 857};
763 858
764struct se_port_stat_grps { 859struct se_port_stat_grps {
765 struct config_group stat_group; 860 struct config_group stat_group;
@@ -785,13 +880,13 @@ struct se_lun {
785 struct se_port *lun_sep; 880 struct se_port *lun_sep;
786 struct config_group lun_group; 881 struct config_group lun_group;
787 struct se_port_stat_grps port_stat_grps; 882 struct se_port_stat_grps port_stat_grps;
788} ____cacheline_aligned; 883};
789 884
790struct scsi_port_stats { 885struct scsi_port_stats {
791 u64 cmd_pdus; 886 u64 cmd_pdus;
792 u64 tx_data_octets; 887 u64 tx_data_octets;
793 u64 rx_data_octets; 888 u64 rx_data_octets;
794} ____cacheline_aligned; 889};
795 890
796struct se_port { 891struct se_port {
797 /* RELATIVE TARGET PORT IDENTIFER */ 892 /* RELATIVE TARGET PORT IDENTIFER */
@@ -811,12 +906,12 @@ struct se_port {
811 struct se_portal_group *sep_tpg; 906 struct se_portal_group *sep_tpg;
812 struct list_head sep_alua_list; 907 struct list_head sep_alua_list;
813 struct list_head sep_list; 908 struct list_head sep_list;
814} ____cacheline_aligned; 909};
815 910
816struct se_tpg_np { 911struct se_tpg_np {
817 struct se_portal_group *tpg_np_parent; 912 struct se_portal_group *tpg_np_parent;
818 struct config_group tpg_np_group; 913 struct config_group tpg_np_group;
819} ____cacheline_aligned; 914};
820 915
821struct se_portal_group { 916struct se_portal_group {
822 /* Type of target portal group, see transport_tpg_type_table */ 917 /* Type of target portal group, see transport_tpg_type_table */
@@ -849,13 +944,13 @@ struct se_portal_group {
849 struct config_group tpg_acl_group; 944 struct config_group tpg_acl_group;
850 struct config_group tpg_attrib_group; 945 struct config_group tpg_attrib_group;
851 struct config_group tpg_param_group; 946 struct config_group tpg_param_group;
852} ____cacheline_aligned; 947};
853 948
854struct se_wwn { 949struct se_wwn {
855 struct target_fabric_configfs *wwn_tf; 950 struct target_fabric_configfs *wwn_tf;
856 struct config_group wwn_group; 951 struct config_group wwn_group;
857 struct config_group *wwn_default_groups[2]; 952 struct config_group *wwn_default_groups[2];
858 struct config_group fabric_stat_group; 953 struct config_group fabric_stat_group;
859} ____cacheline_aligned; 954};
860 955
861#endif /* TARGET_CORE_BASE_H */ 956#endif /* TARGET_CORE_BASE_H */
diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h
deleted file mode 100644
index 2be31ff8763b..000000000000
--- a/include/target/target_core_device.h
+++ /dev/null
@@ -1,63 +0,0 @@
1#ifndef TARGET_CORE_DEVICE_H
2#define TARGET_CORE_DEVICE_H
3
4extern int transport_lookup_cmd_lun(struct se_cmd *, u32);
5extern int transport_lookup_tmr_lun(struct se_cmd *, u32);
6extern struct se_dev_entry *core_get_se_deve_from_rtpi(
7 struct se_node_acl *, u16);
8extern int core_free_device_list_for_node(struct se_node_acl *,
9 struct se_portal_group *);
10extern void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
11extern void core_update_device_list_access(u32, u32, struct se_node_acl *);
12extern int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *, u32,
13 u32, struct se_node_acl *,
14 struct se_portal_group *, int);
15extern void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
16extern int core_dev_export(struct se_device *, struct se_portal_group *,
17 struct se_lun *);
18extern void core_dev_unexport(struct se_device *, struct se_portal_group *,
19 struct se_lun *);
20extern int target_report_luns(struct se_task *);
21extern void se_release_device_for_hba(struct se_device *);
22extern void se_release_vpd_for_dev(struct se_device *);
23extern void se_clear_dev_ports(struct se_device *);
24extern int se_free_virtual_device(struct se_device *, struct se_hba *);
25extern int se_dev_check_online(struct se_device *);
26extern int se_dev_check_shutdown(struct se_device *);
27extern void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
28extern int se_dev_set_task_timeout(struct se_device *, u32);
29extern int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
30extern int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
31extern int se_dev_set_unmap_granularity(struct se_device *, u32);
32extern int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
33extern int se_dev_set_emulate_dpo(struct se_device *, int);
34extern int se_dev_set_emulate_fua_write(struct se_device *, int);
35extern int se_dev_set_emulate_fua_read(struct se_device *, int);
36extern int se_dev_set_emulate_write_cache(struct se_device *, int);
37extern int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
38extern int se_dev_set_emulate_tas(struct se_device *, int);
39extern int se_dev_set_emulate_tpu(struct se_device *, int);
40extern int se_dev_set_emulate_tpws(struct se_device *, int);
41extern int se_dev_set_enforce_pr_isids(struct se_device *, int);
42extern int se_dev_set_is_nonrot(struct se_device *, int);
43extern int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
44extern int se_dev_set_queue_depth(struct se_device *, u32);
45extern int se_dev_set_max_sectors(struct se_device *, u32);
46extern int se_dev_set_optimal_sectors(struct se_device *, u32);
47extern int se_dev_set_block_size(struct se_device *, u32);
48extern struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
49 struct se_device *, u32);
50extern int core_dev_del_lun(struct se_portal_group *, u32);
51extern struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
52extern struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
53 u32, char *, int *);
54extern int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
55 struct se_lun_acl *, u32, u32);
56extern int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
57 struct se_lun *, struct se_lun_acl *);
58extern void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
59 struct se_lun_acl *lacl);
60extern int core_dev_setup_virtual_lun0(void);
61extern void core_dev_release_virtual_lun0(void);
62
63#endif /* TARGET_CORE_DEVICE_H */
diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric.h
index 0256825f923d..523e8bc104d4 100644
--- a/include/target/target_core_fabric_ops.h
+++ b/include/target/target_core_fabric.h
@@ -1,5 +1,5 @@
1/* Defined in target_core_configfs.h */ 1#ifndef TARGET_CORE_FABRIC_H
2struct target_fabric_configfs; 2#define TARGET_CORE_FABRIC_H
3 3
4struct target_core_fabric_ops { 4struct target_core_fabric_ops {
5 struct configfs_subsystem *tf_subsys; 5 struct configfs_subsystem *tf_subsys;
@@ -52,10 +52,6 @@ struct target_core_fabric_ops {
52 * Returning 0 will signal a descriptor has not been released. 52 * Returning 0 will signal a descriptor has not been released.
53 */ 53 */
54 int (*check_stop_free)(struct se_cmd *); 54 int (*check_stop_free)(struct se_cmd *);
55 /*
56 * Optional check for active I/O shutdown
57 */
58 int (*check_release_cmd)(struct se_cmd *);
59 void (*release_cmd)(struct se_cmd *); 55 void (*release_cmd)(struct se_cmd *);
60 /* 56 /*
61 * Called with spin_lock_bh(struct se_portal_group->session_lock held. 57 * Called with spin_lock_bh(struct se_portal_group->session_lock held.
@@ -103,3 +99,89 @@ struct target_core_fabric_ops {
103 struct config_group *, const char *); 99 struct config_group *, const char *);
104 void (*fabric_drop_nodeacl)(struct se_node_acl *); 100 void (*fabric_drop_nodeacl)(struct se_node_acl *);
105}; 101};
102
103struct se_session *transport_init_session(void);
104void __transport_register_session(struct se_portal_group *,
105 struct se_node_acl *, struct se_session *, void *);
106void transport_register_session(struct se_portal_group *,
107 struct se_node_acl *, struct se_session *, void *);
108void transport_free_session(struct se_session *);
109void transport_deregister_session_configfs(struct se_session *);
110void transport_deregister_session(struct se_session *);
111
112
113void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *,
114 struct se_session *, u32, int, int, unsigned char *);
115int transport_lookup_cmd_lun(struct se_cmd *, u32);
116int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
117int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
118 unsigned char *, u32, u32, int, int, int);
119int transport_handle_cdb_direct(struct se_cmd *);
120int transport_generic_handle_cdb_map(struct se_cmd *);
121int transport_generic_handle_data(struct se_cmd *);
122int transport_generic_map_mem_to_cmd(struct se_cmd *cmd,
123 struct scatterlist *, u32, struct scatterlist *, u32);
124void transport_do_task_sg_chain(struct se_cmd *);
125int transport_generic_new_cmd(struct se_cmd *);
126
127void transport_generic_process_write(struct se_cmd *);
128
129void transport_generic_free_cmd(struct se_cmd *, int);
130
131bool transport_wait_for_tasks(struct se_cmd *);
132int transport_check_aborted_status(struct se_cmd *, int);
133int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
134
135void target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
136int target_put_sess_cmd(struct se_session *, struct se_cmd *);
137void target_splice_sess_cmd_list(struct se_session *);
138void target_wait_for_sess_cmds(struct se_session *, int);
139
140int core_alua_check_nonop_delay(struct se_cmd *);
141
142struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
143void core_tmr_release_req(struct se_tmr_req *);
144int transport_generic_handle_tmr(struct se_cmd *);
145int transport_lookup_tmr_lun(struct se_cmd *, u32);
146
147struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
148 unsigned char *);
149void core_tpg_clear_object_luns(struct se_portal_group *);
150struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *,
151 struct se_node_acl *, const char *, u32);
152int core_tpg_del_initiator_node_acl(struct se_portal_group *,
153 struct se_node_acl *, int);
154int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
155 unsigned char *, u32, int);
156int core_tpg_register(struct target_core_fabric_ops *, struct se_wwn *,
157 struct se_portal_group *, void *, int);
158int core_tpg_deregister(struct se_portal_group *);
159
160/* SAS helpers */
161u8 sas_get_fabric_proto_ident(struct se_portal_group *);
162u32 sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
163 struct t10_pr_registration *, int *, unsigned char *);
164u32 sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
165 struct t10_pr_registration *, int *);
166char *sas_parse_pr_out_transport_id(struct se_portal_group *, const char *,
167 u32 *, char **);
168
169/* FC helpers */
170u8 fc_get_fabric_proto_ident(struct se_portal_group *);
171u32 fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
172 struct t10_pr_registration *, int *, unsigned char *);
173u32 fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
174 struct t10_pr_registration *, int *);
175char *fc_parse_pr_out_transport_id(struct se_portal_group *, const char *,
176 u32 *, char **);
177
178/* iSCSI helpers */
179u8 iscsi_get_fabric_proto_ident(struct se_portal_group *);
180u32 iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
181 struct t10_pr_registration *, int *, unsigned char *);
182u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
183 struct t10_pr_registration *, int *);
184char *iscsi_parse_pr_out_transport_id(struct se_portal_group *, const char *,
185 u32 *, char **);
186
187#endif /* TARGET_CORE_FABRICH */
diff --git a/include/target/target_core_fabric_lib.h b/include/target/target_core_fabric_lib.h
deleted file mode 100644
index c2f8d0e3a03b..000000000000
--- a/include/target/target_core_fabric_lib.h
+++ /dev/null
@@ -1,28 +0,0 @@
1#ifndef TARGET_CORE_FABRIC_LIB_H
2#define TARGET_CORE_FABRIC_LIB_H
3
4extern u8 sas_get_fabric_proto_ident(struct se_portal_group *);
5extern u32 sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
6 struct t10_pr_registration *, int *, unsigned char *);
7extern u32 sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
8 struct t10_pr_registration *, int *);
9extern char *sas_parse_pr_out_transport_id(struct se_portal_group *,
10 const char *, u32 *, char **);
11
12extern u8 fc_get_fabric_proto_ident(struct se_portal_group *);
13extern u32 fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
14 struct t10_pr_registration *, int *, unsigned char *);
15extern u32 fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
16 struct t10_pr_registration *, int *);
17extern char *fc_parse_pr_out_transport_id(struct se_portal_group *,
18 const char *, u32 *, char **);
19
20extern u8 iscsi_get_fabric_proto_ident(struct se_portal_group *);
21extern u32 iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
22 struct t10_pr_registration *, int *, unsigned char *);
23extern u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
24 struct t10_pr_registration *, int *);
25extern char *iscsi_parse_pr_out_transport_id(struct se_portal_group *,
26 const char *, u32 *, char **);
27
28#endif /* TARGET_CORE_FABRIC_LIB_H */
diff --git a/include/target/target_core_tmr.h b/include/target/target_core_tmr.h
deleted file mode 100644
index d5876e17d3fb..000000000000
--- a/include/target/target_core_tmr.h
+++ /dev/null
@@ -1,35 +0,0 @@
1#ifndef TARGET_CORE_TMR_H
2#define TARGET_CORE_TMR_H
3
4/* fabric independent task management function values */
5enum tcm_tmreq_table {
6 TMR_ABORT_TASK = 1,
7 TMR_ABORT_TASK_SET = 2,
8 TMR_CLEAR_ACA = 3,
9 TMR_CLEAR_TASK_SET = 4,
10 TMR_LUN_RESET = 5,
11 TMR_TARGET_WARM_RESET = 6,
12 TMR_TARGET_COLD_RESET = 7,
13 TMR_FABRIC_TMR = 255,
14};
15
16/* fabric independent task management response values */
17enum tcm_tmrsp_table {
18 TMR_FUNCTION_COMPLETE = 0,
19 TMR_TASK_DOES_NOT_EXIST = 1,
20 TMR_LUN_DOES_NOT_EXIST = 2,
21 TMR_TASK_STILL_ALLEGIANT = 3,
22 TMR_TASK_FAILOVER_NOT_SUPPORTED = 4,
23 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED = 5,
24 TMR_FUNCTION_AUTHORIZATION_FAILED = 6,
25 TMR_FUNCTION_REJECTED = 255,
26};
27
28extern struct kmem_cache *se_tmr_req_cache;
29
30extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
31extern void core_tmr_release_req(struct se_tmr_req *);
32extern int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
33 struct list_head *, struct se_cmd *);
34
35#endif /* TARGET_CORE_TMR_H */
diff --git a/include/target/target_core_tpg.h b/include/target/target_core_tpg.h
deleted file mode 100644
index 77e18729c4c1..000000000000
--- a/include/target/target_core_tpg.h
+++ /dev/null
@@ -1,35 +0,0 @@
1#ifndef TARGET_CORE_TPG_H
2#define TARGET_CORE_TPG_H
3
4extern struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
5 const char *);
6extern struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
7 unsigned char *);
8extern void core_tpg_add_node_to_devs(struct se_node_acl *,
9 struct se_portal_group *);
10extern struct se_node_acl *core_tpg_check_initiator_node_acl(
11 struct se_portal_group *,
12 unsigned char *);
13extern void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
14extern void core_tpg_wait_for_mib_ref(struct se_node_acl *);
15extern void core_tpg_clear_object_luns(struct se_portal_group *);
16extern struct se_node_acl *core_tpg_add_initiator_node_acl(
17 struct se_portal_group *,
18 struct se_node_acl *,
19 const char *, u32);
20extern int core_tpg_del_initiator_node_acl(struct se_portal_group *,
21 struct se_node_acl *, int);
22extern int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
23 unsigned char *, u32, int);
24extern int core_tpg_register(struct target_core_fabric_ops *,
25 struct se_wwn *,
26 struct se_portal_group *, void *,
27 int);
28extern int core_tpg_deregister(struct se_portal_group *);
29extern struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
30extern int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, u32,
31 void *);
32extern struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32, int *);
33extern int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
34
35#endif /* TARGET_CORE_TPG_H */
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
deleted file mode 100644
index dac4f2d859fd..000000000000
--- a/include/target/target_core_transport.h
+++ /dev/null
@@ -1,287 +0,0 @@
1#ifndef TARGET_CORE_TRANSPORT_H
2#define TARGET_CORE_TRANSPORT_H
3
4#define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION
5
6/* Attempts before moving from SHORT to LONG */
7#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3
8#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */
9#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG 10 /* In milliseconds */
10
11#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
12
13#define TRANSPORT_PLUGIN_PHBA_PDEV 1
14#define TRANSPORT_PLUGIN_VHBA_PDEV 2
15#define TRANSPORT_PLUGIN_VHBA_VDEV 3
16
17/*
18 * struct se_subsystem_dev->su_dev_flags
19*/
20#define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001
21#define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002
22#define SDF_USING_UDEV_PATH 0x00000004
23#define SDF_USING_ALIAS 0x00000008
24
25/*
26 * struct se_device->dev_flags
27 */
28#define DF_READ_ONLY 0x00000001
29#define DF_SPC2_RESERVATIONS 0x00000002
30#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004
31
32/* struct se_dev_attrib sanity values */
33/* Default max_unmap_lba_count */
34#define DA_MAX_UNMAP_LBA_COUNT 0
35/* Default max_unmap_block_desc_count */
36#define DA_MAX_UNMAP_BLOCK_DESC_COUNT 0
37/* Default unmap_granularity */
38#define DA_UNMAP_GRANULARITY_DEFAULT 0
39/* Default unmap_granularity_alignment */
40#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
41/* Emulation for Direct Page Out */
42#define DA_EMULATE_DPO 0
43/* Emulation for Forced Unit Access WRITEs */
44#define DA_EMULATE_FUA_WRITE 1
45/* Emulation for Forced Unit Access READs */
46#define DA_EMULATE_FUA_READ 0
47/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
48#define DA_EMULATE_WRITE_CACHE 0
49/* Emulation for UNIT ATTENTION Interlock Control */
50#define DA_EMULATE_UA_INTLLCK_CTRL 0
51/* Emulation for TASK_ABORTED status (TAS) by default */
52#define DA_EMULATE_TAS 1
53/* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
54#define DA_EMULATE_TPU 0
55/*
56 * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using
57 * block/blk-lib.c:blkdev_issue_discard()
58 */
59#define DA_EMULATE_TPWS 0
60/* No Emulation for PSCSI by default */
61#define DA_EMULATE_RESERVATIONS 0
62/* No Emulation for PSCSI by default */
63#define DA_EMULATE_ALUA 0
64/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
65#define DA_ENFORCE_PR_ISIDS 1
66#define DA_STATUS_MAX_SECTORS_MIN 16
67#define DA_STATUS_MAX_SECTORS_MAX 8192
68/* By default don't report non-rotating (solid state) medium */
69#define DA_IS_NONROT 0
70/* Queue Algorithm Modifier default for restricted reordering in control mode page */
71#define DA_EMULATE_REST_REORD 0
72
73#define SE_MODE_PAGE_BUF 512
74
75#define MOD_MAX_SECTORS(ms, bs) (ms % (PAGE_SIZE / bs))
76
77struct se_subsystem_api;
78
79extern int init_se_kmem_caches(void);
80extern void release_se_kmem_caches(void);
81extern u32 scsi_get_new_index(scsi_index_t);
82extern void transport_init_queue_obj(struct se_queue_obj *);
83extern void transport_subsystem_check_init(void);
84extern int transport_subsystem_register(struct se_subsystem_api *);
85extern void transport_subsystem_release(struct se_subsystem_api *);
86extern void transport_load_plugins(void);
87extern struct se_session *transport_init_session(void);
88extern void __transport_register_session(struct se_portal_group *,
89 struct se_node_acl *,
90 struct se_session *, void *);
91extern void transport_register_session(struct se_portal_group *,
92 struct se_node_acl *,
93 struct se_session *, void *);
94extern void transport_free_session(struct se_session *);
95extern void transport_deregister_session_configfs(struct se_session *);
96extern void transport_deregister_session(struct se_session *);
97extern void transport_cmd_finish_abort(struct se_cmd *, int);
98extern void transport_complete_sync_cache(struct se_cmd *, int);
99extern void transport_complete_task(struct se_task *, int);
100extern void transport_add_task_to_execute_queue(struct se_task *,
101 struct se_task *,
102 struct se_device *);
103extern void transport_remove_task_from_execute_queue(struct se_task *,
104 struct se_device *);
105extern void __transport_remove_task_from_execute_queue(struct se_task *,
106 struct se_device *);
107unsigned char *transport_dump_cmd_direction(struct se_cmd *);
108extern void transport_dump_dev_state(struct se_device *, char *, int *);
109extern void transport_dump_dev_info(struct se_device *, struct se_lun *,
110 unsigned long long, char *, int *);
111extern void transport_dump_vpd_proto_id(struct t10_vpd *,
112 unsigned char *, int);
113extern void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
114extern int transport_dump_vpd_assoc(struct t10_vpd *,
115 unsigned char *, int);
116extern int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
117extern int transport_dump_vpd_ident_type(struct t10_vpd *,
118 unsigned char *, int);
119extern int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
120extern int transport_dump_vpd_ident(struct t10_vpd *,
121 unsigned char *, int);
122extern int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
123extern struct se_device *transport_add_device_to_core_hba(struct se_hba *,
124 struct se_subsystem_api *,
125 struct se_subsystem_dev *, u32,
126 void *, struct se_dev_limits *,
127 const char *, const char *);
128extern void transport_init_se_cmd(struct se_cmd *,
129 struct target_core_fabric_ops *,
130 struct se_session *, u32, int, int,
131 unsigned char *);
132void *transport_kmap_first_data_page(struct se_cmd *cmd);
133void transport_kunmap_first_data_page(struct se_cmd *cmd);
134extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
135extern int transport_handle_cdb_direct(struct se_cmd *);
136extern int transport_generic_handle_cdb_map(struct se_cmd *);
137extern int transport_generic_handle_data(struct se_cmd *);
138extern int transport_generic_handle_tmr(struct se_cmd *);
139extern bool target_stop_task(struct se_task *task, unsigned long *flags);
140extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
141 struct scatterlist *, u32);
142extern int transport_clear_lun_from_sessions(struct se_lun *);
143extern bool transport_wait_for_tasks(struct se_cmd *);
144extern int transport_check_aborted_status(struct se_cmd *, int);
145extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
146extern void transport_send_task_abort(struct se_cmd *);
147extern void transport_release_cmd(struct se_cmd *);
148extern void transport_generic_free_cmd(struct se_cmd *, int);
149extern void target_get_sess_cmd(struct se_session *, struct se_cmd *);
150extern int target_put_sess_cmd(struct se_session *, struct se_cmd *);
151extern void target_splice_sess_cmd_list(struct se_session *);
152extern void target_wait_for_sess_cmds(struct se_session *, int);
153extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
154extern void transport_do_task_sg_chain(struct se_cmd *);
155extern void transport_generic_process_write(struct se_cmd *);
156extern int transport_generic_new_cmd(struct se_cmd *);
157extern int transport_generic_do_tmr(struct se_cmd *);
158/* From target_core_alua.c */
159extern int core_alua_check_nonop_delay(struct se_cmd *);
160/* From target_core_cdb.c */
161extern int transport_emulate_control_cdb(struct se_task *);
162extern void target_get_task_cdb(struct se_task *task, unsigned char *cdb);
163
164/*
165 * Each se_transport_task_t can have N number of possible struct se_task's
166 * for the storage transport(s) to possibly execute.
167 * Used primarily for splitting up CDBs that exceed the physical storage
168 * HBA's maximum sector count per task.
169 */
170struct se_mem {
171 struct page *se_page;
172 u32 se_len;
173 u32 se_off;
174 struct list_head se_list;
175} ____cacheline_aligned;
176
177/*
178 * Each type of disk transport supported MUST have a template defined
179 * within its .h file.
180 */
181struct se_subsystem_api {
182 /*
183 * The Name. :-)
184 */
185 char name[16];
186 /*
187 * Transport Type.
188 */
189 u8 transport_type;
190
191 unsigned int fua_write_emulated : 1;
192 unsigned int write_cache_emulated : 1;
193
194 /*
195 * struct module for struct se_hba references
196 */
197 struct module *owner;
198 /*
199 * Used for global se_subsystem_api list_head
200 */
201 struct list_head sub_api_list;
202 /*
203 * attach_hba():
204 */
205 int (*attach_hba)(struct se_hba *, u32);
206 /*
207 * detach_hba():
208 */
209 void (*detach_hba)(struct se_hba *);
210 /*
211 * pmode_hba(): Used for TCM/pSCSI subsystem plugin HBA ->
212 * Linux/SCSI struct Scsi_Host passthrough
213 */
214 int (*pmode_enable_hba)(struct se_hba *, unsigned long);
215 /*
216 * allocate_virtdevice():
217 */
218 void *(*allocate_virtdevice)(struct se_hba *, const char *);
219 /*
220 * create_virtdevice(): Only for Virtual HBAs
221 */
222 struct se_device *(*create_virtdevice)(struct se_hba *,
223 struct se_subsystem_dev *, void *);
224 /*
225 * free_device():
226 */
227 void (*free_device)(void *);
228
229 /*
230 * transport_complete():
231 *
232 * Use transport_generic_complete() for majority of DAS transport
233 * drivers. Provided out of convenience.
234 */
235 int (*transport_complete)(struct se_task *task);
236 struct se_task *(*alloc_task)(unsigned char *cdb);
237 /*
238 * do_task():
239 */
240 int (*do_task)(struct se_task *);
241 /*
242 * Used by virtual subsystem plugins IBLOCK and FILEIO to emulate
243 * UNMAP and WRITE_SAME_* w/ UNMAP=1 <-> Linux/Block Discard
244 */
245 int (*do_discard)(struct se_device *, sector_t, u32);
246 /*
247 * Used by virtual subsystem plugins IBLOCK and FILEIO to emulate
248 * SYNCHRONIZE_CACHE_* <-> Linux/Block blkdev_issue_flush()
249 */
250 void (*do_sync_cache)(struct se_task *);
251 /*
252 * free_task():
253 */
254 void (*free_task)(struct se_task *);
255 /*
256 * check_configfs_dev_params():
257 */
258 ssize_t (*check_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *);
259 /*
260 * set_configfs_dev_params():
261 */
262 ssize_t (*set_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
263 const char *, ssize_t);
264 /*
265 * show_configfs_dev_params():
266 */
267 ssize_t (*show_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
268 char *);
269 /*
270 * get_device_rev():
271 */
272 u32 (*get_device_rev)(struct se_device *);
273 /*
274 * get_device_type():
275 */
276 u32 (*get_device_type)(struct se_device *);
277 /*
278 * Get the sector_t from a subsystem backstore..
279 */
280 sector_t (*get_blocks)(struct se_device *);
281 /*
282 * get_sense_buffer():
283 */
284 unsigned char *(*get_sense_buffer)(struct se_task *);
285} ____cacheline_aligned;
286
287#endif /* TARGET_CORE_TRANSPORT_H */
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 9b7c8ab7d75c..86ee272de210 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -128,7 +128,6 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
128 128
129 if (S_ISREG(mode)) { 129 if (S_ISREG(mode)) {
130 struct mqueue_inode_info *info; 130 struct mqueue_inode_info *info;
131 struct task_struct *p = current;
132 unsigned long mq_bytes, mq_msg_tblsz; 131 unsigned long mq_bytes, mq_msg_tblsz;
133 132
134 inode->i_fop = &mqueue_file_operations; 133 inode->i_fop = &mqueue_file_operations;
@@ -159,7 +158,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
159 158
160 spin_lock(&mq_lock); 159 spin_lock(&mq_lock);
161 if (u->mq_bytes + mq_bytes < u->mq_bytes || 160 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
162 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) { 161 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
163 spin_unlock(&mq_lock); 162 spin_unlock(&mq_lock);
164 /* mqueue_evict_inode() releases info->messages */ 163 /* mqueue_evict_inode() releases info->messages */
165 ret = -EMFILE; 164 ret = -EMFILE;
diff --git a/ipc/shm.c b/ipc/shm.c
index 02ecf2c078fc..b76be5bda6c2 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -870,9 +870,7 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
870 case SHM_LOCK: 870 case SHM_LOCK:
871 case SHM_UNLOCK: 871 case SHM_UNLOCK:
872 { 872 {
873 struct file *uninitialized_var(shm_file); 873 struct file *shm_file;
874
875 lru_add_drain_all(); /* drain pagevecs to lru lists */
876 874
877 shp = shm_lock_check(ns, shmid); 875 shp = shm_lock_check(ns, shmid);
878 if (IS_ERR(shp)) { 876 if (IS_ERR(shp)) {
@@ -895,22 +893,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
895 err = security_shm_shmctl(shp, cmd); 893 err = security_shm_shmctl(shp, cmd);
896 if (err) 894 if (err)
897 goto out_unlock; 895 goto out_unlock;
898 896
899 if(cmd==SHM_LOCK) { 897 shm_file = shp->shm_file;
898 if (is_file_hugepages(shm_file))
899 goto out_unlock;
900
901 if (cmd == SHM_LOCK) {
900 struct user_struct *user = current_user(); 902 struct user_struct *user = current_user();
901 if (!is_file_hugepages(shp->shm_file)) { 903 err = shmem_lock(shm_file, 1, user);
902 err = shmem_lock(shp->shm_file, 1, user); 904 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
903 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){ 905 shp->shm_perm.mode |= SHM_LOCKED;
904 shp->shm_perm.mode |= SHM_LOCKED; 906 shp->mlock_user = user;
905 shp->mlock_user = user;
906 }
907 } 907 }
908 } else if (!is_file_hugepages(shp->shm_file)) { 908 goto out_unlock;
909 shmem_lock(shp->shm_file, 0, shp->mlock_user);
910 shp->shm_perm.mode &= ~SHM_LOCKED;
911 shp->mlock_user = NULL;
912 } 909 }
910
911 /* SHM_UNLOCK */
912 if (!(shp->shm_perm.mode & SHM_LOCKED))
913 goto out_unlock;
914 shmem_lock(shm_file, 0, shp->mlock_user);
915 shp->shm_perm.mode &= ~SHM_LOCKED;
916 shp->mlock_user = NULL;
917 get_file(shm_file);
913 shm_unlock(shp); 918 shm_unlock(shp);
919 shmem_unlock_mapping(shm_file->f_mapping);
920 fput(shm_file);
914 goto out; 921 goto out;
915 } 922 }
916 case IPC_RMID: 923 case IPC_RMID:
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index caaea6e944f8..af1de0f34eae 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1863,11 +1863,12 @@ void __audit_syscall_entry(int arch, int major,
1863 1863
1864/** 1864/**
1865 * audit_syscall_exit - deallocate audit context after a system call 1865 * audit_syscall_exit - deallocate audit context after a system call
1866 * @pt_regs: syscall registers 1866 * @success: success value of the syscall
1867 * @return_code: return value of the syscall
1867 * 1868 *
1868 * Tear down after system call. If the audit context has been marked as 1869 * Tear down after system call. If the audit context has been marked as
1869 * auditable (either because of the AUDIT_RECORD_CONTEXT state from 1870 * auditable (either because of the AUDIT_RECORD_CONTEXT state from
1870 * filtering, or because some other part of the kernel write an audit 1871 * filtering, or because some other part of the kernel wrote an audit
1871 * message), then write out the syscall information. In call cases, 1872 * message), then write out the syscall information. In call cases,
1872 * free the names stored from getname(). 1873 * free the names stored from getname().
1873 */ 1874 */
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 95dd7212e610..29f5b65bee29 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1077,6 +1077,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
1077 /* Early boot. kretprobe_table_locks not yet initialized. */ 1077 /* Early boot. kretprobe_table_locks not yet initialized. */
1078 return; 1078 return;
1079 1079
1080 INIT_HLIST_HEAD(&empty_rp);
1080 hash = hash_ptr(tk, KPROBE_HASH_BITS); 1081 hash = hash_ptr(tk, KPROBE_HASH_BITS);
1081 head = &kretprobe_inst_table[hash]; 1082 head = &kretprobe_inst_table[hash];
1082 kretprobe_table_lock(hash, &flags); 1083 kretprobe_table_lock(hash, &flags);
@@ -1085,7 +1086,6 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
1085 recycle_rp_inst(ri, &empty_rp); 1086 recycle_rp_inst(ri, &empty_rp);
1086 } 1087 }
1087 kretprobe_table_unlock(hash, &flags); 1088 kretprobe_table_unlock(hash, &flags);
1088 INIT_HLIST_HEAD(&empty_rp);
1089 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 1089 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
1090 hlist_del(&ri->hlist); 1090 hlist_del(&ri->hlist);
1091 kfree(ri); 1091 kfree(ri);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 1cf88900ec4f..6a768e537001 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -812,7 +812,8 @@ unsigned int snapshot_additional_pages(struct zone *zone)
812 unsigned int res; 812 unsigned int res;
813 813
814 res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); 814 res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
815 res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE); 815 res += DIV_ROUND_UP(res * sizeof(struct bm_block),
816 LINKED_PAGE_DATA_SIZE);
816 return 2 * res; 817 return 2 * res;
817} 818}
818 819
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index b0d798eaf130..d72586fdf660 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -129,7 +129,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
129 * cpupri_set - update the cpu priority setting 129 * cpupri_set - update the cpu priority setting
130 * @cp: The cpupri context 130 * @cp: The cpupri context
131 * @cpu: The target cpu 131 * @cpu: The target cpu
132 * @pri: The priority (INVALID-RT99) to assign to this CPU 132 * @newpri: The priority (INVALID-RT99) to assign to this CPU
133 * 133 *
134 * Note: Assumes cpu_rq(cpu)->lock is locked 134 * Note: Assumes cpu_rq(cpu)->lock is locked
135 * 135 *
@@ -200,7 +200,6 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
200/** 200/**
201 * cpupri_init - initialize the cpupri structure 201 * cpupri_init - initialize the cpupri structure
202 * @cp: The cpupri context 202 * @cp: The cpupri context
203 * @bootmem: true if allocations need to use bootmem
204 * 203 *
205 * Returns: -ENOMEM if memory fails. 204 * Returns: -ENOMEM if memory fails.
206 */ 205 */
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index db110b8ae030..f1539decd99d 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -634,10 +634,11 @@ static int tracepoint_module_coming(struct module *mod)
634 int ret = 0; 634 int ret = 0;
635 635
636 /* 636 /*
637 * We skip modules that tain the kernel, especially those with different 637 * We skip modules that taint the kernel, especially those with different
638 * module header (for forced load), to make sure we don't cause a crash. 638 * module headers (for forced load), to make sure we don't cause a crash.
639 * Staging and out-of-tree GPL modules are fine.
639 */ 640 */
640 if (mod->taints) 641 if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
641 return 0; 642 return 0;
642 mutex_lock(&tracepoints_mutex); 643 mutex_lock(&tracepoints_mutex);
643 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); 644 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index fe84bb978e3b..716802b774ea 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -255,6 +255,8 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign)
255 if (!n) 255 if (!n)
256 n++; /* avoid zero length allocation */ 256 n++; /* avoid zero length allocation */
257 p = buffer = kmalloc(n, GFP_KERNEL); 257 p = buffer = kmalloc(n, GFP_KERNEL);
258 if (!p)
259 return NULL;
258 260
259 for (i = a->nlimbs - 1; i >= 0; i--) { 261 for (i = a->nlimbs - 1; i >= 0; i--) {
260 alimb = a->d[i]; 262 alimb = a->d[i];
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ea8c3a4cd2ae..5f34bd8dda34 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2508,6 +2508,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2508{ 2508{
2509 struct hstate *h = hstate_vma(vma); 2509 struct hstate *h = hstate_vma(vma);
2510 int ret = VM_FAULT_SIGBUS; 2510 int ret = VM_FAULT_SIGBUS;
2511 int anon_rmap = 0;
2511 pgoff_t idx; 2512 pgoff_t idx;
2512 unsigned long size; 2513 unsigned long size;
2513 struct page *page; 2514 struct page *page;
@@ -2562,14 +2563,13 @@ retry:
2562 spin_lock(&inode->i_lock); 2563 spin_lock(&inode->i_lock);
2563 inode->i_blocks += blocks_per_huge_page(h); 2564 inode->i_blocks += blocks_per_huge_page(h);
2564 spin_unlock(&inode->i_lock); 2565 spin_unlock(&inode->i_lock);
2565 page_dup_rmap(page);
2566 } else { 2566 } else {
2567 lock_page(page); 2567 lock_page(page);
2568 if (unlikely(anon_vma_prepare(vma))) { 2568 if (unlikely(anon_vma_prepare(vma))) {
2569 ret = VM_FAULT_OOM; 2569 ret = VM_FAULT_OOM;
2570 goto backout_unlocked; 2570 goto backout_unlocked;
2571 } 2571 }
2572 hugepage_add_new_anon_rmap(page, vma, address); 2572 anon_rmap = 1;
2573 } 2573 }
2574 } else { 2574 } else {
2575 /* 2575 /*
@@ -2582,7 +2582,6 @@ retry:
2582 VM_FAULT_SET_HINDEX(h - hstates); 2582 VM_FAULT_SET_HINDEX(h - hstates);
2583 goto backout_unlocked; 2583 goto backout_unlocked;
2584 } 2584 }
2585 page_dup_rmap(page);
2586 } 2585 }
2587 2586
2588 /* 2587 /*
@@ -2606,6 +2605,10 @@ retry:
2606 if (!huge_pte_none(huge_ptep_get(ptep))) 2605 if (!huge_pte_none(huge_ptep_get(ptep)))
2607 goto backout; 2606 goto backout;
2608 2607
2608 if (anon_rmap)
2609 hugepage_add_new_anon_rmap(page, vma, address);
2610 else
2611 page_dup_rmap(page);
2609 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 2612 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2610 && (vma->vm_flags & VM_SHARED))); 2613 && (vma->vm_flags & VM_SHARED)));
2611 set_huge_pte_at(mm, address, ptep, new_pte); 2614 set_huge_pte_at(mm, address, ptep, new_pte);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c3688dfd9a5f..556859fec4ef 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3247,7 +3247,7 @@ int mem_cgroup_prepare_migration(struct page *page,
3247 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 3247 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3248 else 3248 else
3249 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; 3249 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3250 __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype); 3250 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype);
3251 return ret; 3251 return ret;
3252} 3252}
3253 3253
diff --git a/mm/memory.c b/mm/memory.c
index 5e30583c2605..fa2f04e0337c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -878,15 +878,24 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
878 } 878 }
879 if (likely(!non_swap_entry(entry))) 879 if (likely(!non_swap_entry(entry)))
880 rss[MM_SWAPENTS]++; 880 rss[MM_SWAPENTS]++;
881 else if (is_write_migration_entry(entry) && 881 else if (is_migration_entry(entry)) {
882 is_cow_mapping(vm_flags)) { 882 page = migration_entry_to_page(entry);
883 /* 883
884 * COW mappings require pages in both parent 884 if (PageAnon(page))
885 * and child to be set to read. 885 rss[MM_ANONPAGES]++;
886 */ 886 else
887 make_migration_entry_read(&entry); 887 rss[MM_FILEPAGES]++;
888 pte = swp_entry_to_pte(entry); 888
889 set_pte_at(src_mm, addr, src_pte, pte); 889 if (is_write_migration_entry(entry) &&
890 is_cow_mapping(vm_flags)) {
891 /*
892 * COW mappings require pages in both
893 * parent and child to be set to read.
894 */
895 make_migration_entry_read(&entry);
896 pte = swp_entry_to_pte(entry);
897 set_pte_at(src_mm, addr, src_pte, pte);
898 }
890 } 899 }
891 } 900 }
892 goto out_set_pte; 901 goto out_set_pte;
@@ -1191,6 +1200,16 @@ again:
1191 1200
1192 if (!non_swap_entry(entry)) 1201 if (!non_swap_entry(entry))
1193 rss[MM_SWAPENTS]--; 1202 rss[MM_SWAPENTS]--;
1203 else if (is_migration_entry(entry)) {
1204 struct page *page;
1205
1206 page = migration_entry_to_page(entry);
1207
1208 if (PageAnon(page))
1209 rss[MM_ANONPAGES]--;
1210 else
1211 rss[MM_FILEPAGES]--;
1212 }
1194 if (unlikely(!free_swap_and_cache(entry))) 1213 if (unlikely(!free_swap_and_cache(entry)))
1195 print_bad_pte(vma, addr, ptent, NULL); 1214 print_bad_pte(vma, addr, ptent, NULL);
1196 } 1215 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0027d8f4a1bb..d2186ecb36f7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5413,7 +5413,25 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
5413 5413
5414bool is_pageblock_removable_nolock(struct page *page) 5414bool is_pageblock_removable_nolock(struct page *page)
5415{ 5415{
5416 struct zone *zone = page_zone(page); 5416 struct zone *zone;
5417 unsigned long pfn;
5418
5419 /*
5420 * We have to be careful here because we are iterating over memory
5421 * sections which are not zone aware so we might end up outside of
5422 * the zone but still within the section.
5423 * We have to take care about the node as well. If the node is offline
5424 * its NODE_DATA will be NULL - see page_zone.
5425 */
5426 if (!node_online(page_to_nid(page)))
5427 return false;
5428
5429 zone = page_zone(page);
5430 pfn = page_to_pfn(page);
5431 if (zone->zone_start_pfn > pfn ||
5432 zone->zone_start_pfn + zone->spanned_pages <= pfn)
5433 return false;
5434
5417 return __count_immobile_pages(zone, page, 0); 5435 return __count_immobile_pages(zone, page, 0);
5418} 5436}
5419 5437
diff --git a/mm/shmem.c b/mm/shmem.c
index feead1943d92..269d049294ab 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -379,7 +379,7 @@ static int shmem_free_swap(struct address_space *mapping,
379/* 379/*
380 * Pagevec may contain swap entries, so shuffle up pages before releasing. 380 * Pagevec may contain swap entries, so shuffle up pages before releasing.
381 */ 381 */
382static void shmem_pagevec_release(struct pagevec *pvec) 382static void shmem_deswap_pagevec(struct pagevec *pvec)
383{ 383{
384 int i, j; 384 int i, j;
385 385
@@ -389,7 +389,36 @@ static void shmem_pagevec_release(struct pagevec *pvec)
389 pvec->pages[j++] = page; 389 pvec->pages[j++] = page;
390 } 390 }
391 pvec->nr = j; 391 pvec->nr = j;
392 pagevec_release(pvec); 392}
393
394/*
395 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
396 */
397void shmem_unlock_mapping(struct address_space *mapping)
398{
399 struct pagevec pvec;
400 pgoff_t indices[PAGEVEC_SIZE];
401 pgoff_t index = 0;
402
403 pagevec_init(&pvec, 0);
404 /*
405 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
406 */
407 while (!mapping_unevictable(mapping)) {
408 /*
409 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
410 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
411 */
412 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
413 PAGEVEC_SIZE, pvec.pages, indices);
414 if (!pvec.nr)
415 break;
416 index = indices[pvec.nr - 1] + 1;
417 shmem_deswap_pagevec(&pvec);
418 check_move_unevictable_pages(pvec.pages, pvec.nr);
419 pagevec_release(&pvec);
420 cond_resched();
421 }
393} 422}
394 423
395/* 424/*
@@ -440,7 +469,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
440 } 469 }
441 unlock_page(page); 470 unlock_page(page);
442 } 471 }
443 shmem_pagevec_release(&pvec); 472 shmem_deswap_pagevec(&pvec);
473 pagevec_release(&pvec);
444 mem_cgroup_uncharge_end(); 474 mem_cgroup_uncharge_end();
445 cond_resched(); 475 cond_resched();
446 index++; 476 index++;
@@ -470,7 +500,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
470 continue; 500 continue;
471 } 501 }
472 if (index == start && indices[0] > end) { 502 if (index == start && indices[0] > end) {
473 shmem_pagevec_release(&pvec); 503 shmem_deswap_pagevec(&pvec);
504 pagevec_release(&pvec);
474 break; 505 break;
475 } 506 }
476 mem_cgroup_uncharge_start(); 507 mem_cgroup_uncharge_start();
@@ -494,7 +525,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
494 } 525 }
495 unlock_page(page); 526 unlock_page(page);
496 } 527 }
497 shmem_pagevec_release(&pvec); 528 shmem_deswap_pagevec(&pvec);
529 pagevec_release(&pvec);
498 mem_cgroup_uncharge_end(); 530 mem_cgroup_uncharge_end();
499 index++; 531 index++;
500 } 532 }
@@ -1068,13 +1100,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
1068 user_shm_unlock(inode->i_size, user); 1100 user_shm_unlock(inode->i_size, user);
1069 info->flags &= ~VM_LOCKED; 1101 info->flags &= ~VM_LOCKED;
1070 mapping_clear_unevictable(file->f_mapping); 1102 mapping_clear_unevictable(file->f_mapping);
1071 /*
1072 * Ensure that a racing putback_lru_page() can see
1073 * the pages of this mapping are evictable when we
1074 * skip them due to !PageLRU during the scan.
1075 */
1076 smp_mb__after_clear_bit();
1077 scan_mapping_unevictable_pages(file->f_mapping);
1078 } 1103 }
1079 retval = 0; 1104 retval = 0;
1080 1105
@@ -2445,6 +2470,10 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
2445 return 0; 2470 return 0;
2446} 2471}
2447 2472
2473void shmem_unlock_mapping(struct address_space *mapping)
2474{
2475}
2476
2448void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 2477void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
2449{ 2478{
2450 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 2479 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2880396f7953..c52b23552659 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -26,7 +26,6 @@
26#include <linux/buffer_head.h> /* for try_to_release_page(), 26#include <linux/buffer_head.h> /* for try_to_release_page(),
27 buffer_heads_over_limit */ 27 buffer_heads_over_limit */
28#include <linux/mm_inline.h> 28#include <linux/mm_inline.h>
29#include <linux/pagevec.h>
30#include <linux/backing-dev.h> 29#include <linux/backing-dev.h>
31#include <linux/rmap.h> 30#include <linux/rmap.h>
32#include <linux/topology.h> 31#include <linux/topology.h>
@@ -661,7 +660,7 @@ redo:
661 * When racing with an mlock or AS_UNEVICTABLE clearing 660 * When racing with an mlock or AS_UNEVICTABLE clearing
662 * (page is unlocked) make sure that if the other thread 661 * (page is unlocked) make sure that if the other thread
663 * does not observe our setting of PG_lru and fails 662 * does not observe our setting of PG_lru and fails
664 * isolation/check_move_unevictable_page, 663 * isolation/check_move_unevictable_pages,
665 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move 664 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
666 * the page back to the evictable list. 665 * the page back to the evictable list.
667 * 666 *
@@ -3499,100 +3498,61 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
3499 return 1; 3498 return 1;
3500} 3499}
3501 3500
3501#ifdef CONFIG_SHMEM
3502/** 3502/**
3503 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list 3503 * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
3504 * @page: page to check evictability and move to appropriate lru list 3504 * @pages: array of pages to check
3505 * @zone: zone page is in 3505 * @nr_pages: number of pages to check
3506 * 3506 *
3507 * Checks a page for evictability and moves the page to the appropriate 3507 * Checks pages for evictability and moves them to the appropriate lru list.
3508 * zone lru list.
3509 * 3508 *
3510 * Restrictions: zone->lru_lock must be held, page must be on LRU and must 3509 * This function is only used for SysV IPC SHM_UNLOCK.
3511 * have PageUnevictable set.
3512 */ 3510 */
3513static void check_move_unevictable_page(struct page *page, struct zone *zone) 3511void check_move_unevictable_pages(struct page **pages, int nr_pages)
3514{ 3512{
3515 struct lruvec *lruvec; 3513 struct lruvec *lruvec;
3514 struct zone *zone = NULL;
3515 int pgscanned = 0;
3516 int pgrescued = 0;
3517 int i;
3516 3518
3517 VM_BUG_ON(PageActive(page)); 3519 for (i = 0; i < nr_pages; i++) {
3518retry: 3520 struct page *page = pages[i];
3519 ClearPageUnevictable(page); 3521 struct zone *pagezone;
3520 if (page_evictable(page, NULL)) {
3521 enum lru_list l = page_lru_base_type(page);
3522
3523 __dec_zone_state(zone, NR_UNEVICTABLE);
3524 lruvec = mem_cgroup_lru_move_lists(zone, page,
3525 LRU_UNEVICTABLE, l);
3526 list_move(&page->lru, &lruvec->lists[l]);
3527 __inc_zone_state(zone, NR_INACTIVE_ANON + l);
3528 __count_vm_event(UNEVICTABLE_PGRESCUED);
3529 } else {
3530 /*
3531 * rotate unevictable list
3532 */
3533 SetPageUnevictable(page);
3534 lruvec = mem_cgroup_lru_move_lists(zone, page, LRU_UNEVICTABLE,
3535 LRU_UNEVICTABLE);
3536 list_move(&page->lru, &lruvec->lists[LRU_UNEVICTABLE]);
3537 if (page_evictable(page, NULL))
3538 goto retry;
3539 }
3540}
3541
3542/**
3543 * scan_mapping_unevictable_pages - scan an address space for evictable pages
3544 * @mapping: struct address_space to scan for evictable pages
3545 *
3546 * Scan all pages in mapping. Check unevictable pages for
3547 * evictability and move them to the appropriate zone lru list.
3548 */
3549void scan_mapping_unevictable_pages(struct address_space *mapping)
3550{
3551 pgoff_t next = 0;
3552 pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
3553 PAGE_CACHE_SHIFT;
3554 struct zone *zone;
3555 struct pagevec pvec;
3556
3557 if (mapping->nrpages == 0)
3558 return;
3559
3560 pagevec_init(&pvec, 0);
3561 while (next < end &&
3562 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
3563 int i;
3564 int pg_scanned = 0;
3565
3566 zone = NULL;
3567
3568 for (i = 0; i < pagevec_count(&pvec); i++) {
3569 struct page *page = pvec.pages[i];
3570 pgoff_t page_index = page->index;
3571 struct zone *pagezone = page_zone(page);
3572 3522
3573 pg_scanned++; 3523 pgscanned++;
3574 if (page_index > next) 3524 pagezone = page_zone(page);
3575 next = page_index; 3525 if (pagezone != zone) {
3576 next++; 3526 if (zone)
3527 spin_unlock_irq(&zone->lru_lock);
3528 zone = pagezone;
3529 spin_lock_irq(&zone->lru_lock);
3530 }
3577 3531
3578 if (pagezone != zone) { 3532 if (!PageLRU(page) || !PageUnevictable(page))
3579 if (zone) 3533 continue;
3580 spin_unlock_irq(&zone->lru_lock);
3581 zone = pagezone;
3582 spin_lock_irq(&zone->lru_lock);
3583 }
3584 3534
3585 if (PageLRU(page) && PageUnevictable(page)) 3535 if (page_evictable(page, NULL)) {
3586 check_move_unevictable_page(page, zone); 3536 enum lru_list lru = page_lru_base_type(page);
3537
3538 VM_BUG_ON(PageActive(page));
3539 ClearPageUnevictable(page);
3540 __dec_zone_state(zone, NR_UNEVICTABLE);
3541 lruvec = mem_cgroup_lru_move_lists(zone, page,
3542 LRU_UNEVICTABLE, lru);
3543 list_move(&page->lru, &lruvec->lists[lru]);
3544 __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
3545 pgrescued++;
3587 } 3546 }
3588 if (zone)
3589 spin_unlock_irq(&zone->lru_lock);
3590 pagevec_release(&pvec);
3591
3592 count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
3593 } 3547 }
3594 3548
3549 if (zone) {
3550 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
3551 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
3552 spin_unlock_irq(&zone->lru_lock);
3553 }
3595} 3554}
3555#endif /* CONFIG_SHMEM */
3596 3556
3597static void warn_scan_unevictable_pages(void) 3557static void warn_scan_unevictable_pages(void)
3598{ 3558{
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index d793001929cf..9b0c0b8b4ab4 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -5,7 +5,7 @@ use strict;
5## Copyright (c) 1998 Michael Zucchi, All Rights Reserved ## 5## Copyright (c) 1998 Michael Zucchi, All Rights Reserved ##
6## Copyright (C) 2000, 1 Tim Waugh <twaugh@redhat.com> ## 6## Copyright (C) 2000, 1 Tim Waugh <twaugh@redhat.com> ##
7## Copyright (C) 2001 Simon Huggins ## 7## Copyright (C) 2001 Simon Huggins ##
8## Copyright (C) 2005-2010 Randy Dunlap ## 8## Copyright (C) 2005-2012 Randy Dunlap ##
9## ## 9## ##
10## #define enhancements by Armin Kuster <akuster@mvista.com> ## 10## #define enhancements by Armin Kuster <akuster@mvista.com> ##
11## Copyright (c) 2000 MontaVista Software, Inc. ## 11## Copyright (c) 2000 MontaVista Software, Inc. ##
@@ -1785,6 +1785,7 @@ sub dump_function($$) {
1785 $prototype =~ s/__devinit +//; 1785 $prototype =~ s/__devinit +//;
1786 $prototype =~ s/__init +//; 1786 $prototype =~ s/__init +//;
1787 $prototype =~ s/__init_or_module +//; 1787 $prototype =~ s/__init_or_module +//;
1788 $prototype =~ s/__must_check +//;
1788 $prototype =~ s/^#\s*define\s+//; #ak added 1789 $prototype =~ s/^#\s*define\s+//; #ak added
1789 $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//; 1790 $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//;
1790 1791
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index d661afbe474c..d45061d02fee 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -99,6 +99,7 @@ static bool ima_match_rules(struct ima_measure_rule_entry *rule,
99 struct inode *inode, enum ima_hooks func, int mask) 99 struct inode *inode, enum ima_hooks func, int mask)
100{ 100{
101 struct task_struct *tsk = current; 101 struct task_struct *tsk = current;
102 const struct cred *cred = current_cred();
102 int i; 103 int i;
103 104
104 if ((rule->flags & IMA_FUNC) && rule->func != func) 105 if ((rule->flags & IMA_FUNC) && rule->func != func)
@@ -108,7 +109,7 @@ static bool ima_match_rules(struct ima_measure_rule_entry *rule,
108 if ((rule->flags & IMA_FSMAGIC) 109 if ((rule->flags & IMA_FSMAGIC)
109 && rule->fsmagic != inode->i_sb->s_magic) 110 && rule->fsmagic != inode->i_sb->s_magic)
110 return false; 111 return false;
111 if ((rule->flags & IMA_UID) && rule->uid != tsk->cred->uid) 112 if ((rule->flags & IMA_UID) && rule->uid != cred->uid)
112 return false; 113 return false;
113 for (i = 0; i < MAX_LSM_RULES; i++) { 114 for (i = 0; i < MAX_LSM_RULES; i++) {
114 int rc = 0; 115 int rc = 0;
diff --git a/security/keys/internal.h b/security/keys/internal.h
index c7a7caec4830..65647f825584 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -33,6 +33,7 @@
33 33
34extern struct key_type key_type_dead; 34extern struct key_type key_type_dead;
35extern struct key_type key_type_user; 35extern struct key_type key_type_user;
36extern struct key_type key_type_logon;
36 37
37/*****************************************************************************/ 38/*****************************************************************************/
38/* 39/*
diff --git a/security/keys/key.c b/security/keys/key.c
index 4f64c7267afb..7ada8019be1f 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -999,6 +999,7 @@ void __init key_init(void)
999 list_add_tail(&key_type_keyring.link, &key_types_list); 999 list_add_tail(&key_type_keyring.link, &key_types_list);
1000 list_add_tail(&key_type_dead.link, &key_types_list); 1000 list_add_tail(&key_type_dead.link, &key_types_list);
1001 list_add_tail(&key_type_user.link, &key_types_list); 1001 list_add_tail(&key_type_user.link, &key_types_list);
1002 list_add_tail(&key_type_logon.link, &key_types_list);
1002 1003
1003 /* record the root user tracking */ 1004 /* record the root user tracking */
1004 rb_link_node(&root_key_user.node, 1005 rb_link_node(&root_key_user.node,
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 69ff52c08e97..c7660a25a3e4 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -18,6 +18,8 @@
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include "internal.h" 19#include "internal.h"
20 20
21static int logon_vet_description(const char *desc);
22
21/* 23/*
22 * user defined keys take an arbitrary string as the description and an 24 * user defined keys take an arbitrary string as the description and an
23 * arbitrary blob of data as the payload 25 * arbitrary blob of data as the payload
@@ -36,6 +38,24 @@ struct key_type key_type_user = {
36EXPORT_SYMBOL_GPL(key_type_user); 38EXPORT_SYMBOL_GPL(key_type_user);
37 39
38/* 40/*
41 * This key type is essentially the same as key_type_user, but it does
42 * not define a .read op. This is suitable for storing username and
43 * password pairs in the keyring that you do not want to be readable
44 * from userspace.
45 */
46struct key_type key_type_logon = {
47 .name = "logon",
48 .instantiate = user_instantiate,
49 .update = user_update,
50 .match = user_match,
51 .revoke = user_revoke,
52 .destroy = user_destroy,
53 .describe = user_describe,
54 .vet_description = logon_vet_description,
55};
56EXPORT_SYMBOL_GPL(key_type_logon);
57
58/*
39 * instantiate a user defined key 59 * instantiate a user defined key
40 */ 60 */
41int user_instantiate(struct key *key, const void *data, size_t datalen) 61int user_instantiate(struct key *key, const void *data, size_t datalen)
@@ -59,7 +79,7 @@ int user_instantiate(struct key *key, const void *data, size_t datalen)
59 /* attach the data */ 79 /* attach the data */
60 upayload->datalen = datalen; 80 upayload->datalen = datalen;
61 memcpy(upayload->data, data, datalen); 81 memcpy(upayload->data, data, datalen);
62 rcu_assign_pointer(key->payload.data, upayload); 82 rcu_assign_keypointer(key, upayload);
63 ret = 0; 83 ret = 0;
64 84
65error: 85error:
@@ -98,7 +118,7 @@ int user_update(struct key *key, const void *data, size_t datalen)
98 if (ret == 0) { 118 if (ret == 0) {
99 /* attach the new data, displacing the old */ 119 /* attach the new data, displacing the old */
100 zap = key->payload.data; 120 zap = key->payload.data;
101 rcu_assign_pointer(key->payload.data, upayload); 121 rcu_assign_keypointer(key, upayload);
102 key->expiry = 0; 122 key->expiry = 0;
103 } 123 }
104 124
@@ -133,7 +153,7 @@ void user_revoke(struct key *key)
133 key_payload_reserve(key, 0); 153 key_payload_reserve(key, 0);
134 154
135 if (upayload) { 155 if (upayload) {
136 rcu_assign_pointer(key->payload.data, NULL); 156 rcu_assign_keypointer(key, NULL);
137 kfree_rcu(upayload, rcu); 157 kfree_rcu(upayload, rcu);
138 } 158 }
139} 159}
@@ -189,3 +209,20 @@ long user_read(const struct key *key, char __user *buffer, size_t buflen)
189} 209}
190 210
191EXPORT_SYMBOL_GPL(user_read); 211EXPORT_SYMBOL_GPL(user_read);
212
213/* Vet the description for a "logon" key */
214static int logon_vet_description(const char *desc)
215{
216 char *p;
217
218 /* require a "qualified" description string */
219 p = strchr(desc, ':');
220 if (!p)
221 return -EINVAL;
222
223 /* also reject description with ':' as first char */
224 if (p == desc)
225 return -EINVAL;
226
227 return 0;
228}
diff --git a/sound/pci/hda/alc880_quirks.c b/sound/pci/hda/alc880_quirks.c
index 5b68435d195b..501501ef36a9 100644
--- a/sound/pci/hda/alc880_quirks.c
+++ b/sound/pci/hda/alc880_quirks.c
@@ -762,16 +762,22 @@ static void alc880_uniwill_unsol_event(struct hda_codec *codec,
762 /* Looks like the unsol event is incompatible with the standard 762 /* Looks like the unsol event is incompatible with the standard
763 * definition. 4bit tag is placed at 28 bit! 763 * definition. 4bit tag is placed at 28 bit!
764 */ 764 */
765 switch (res >> 28) { 765 res >>= 28;
766 switch (res) {
766 case ALC_MIC_EVENT: 767 case ALC_MIC_EVENT:
767 alc88x_simple_mic_automute(codec); 768 alc88x_simple_mic_automute(codec);
768 break; 769 break;
769 default: 770 default:
770 alc_sku_unsol_event(codec, res); 771 alc_exec_unsol_event(codec, res);
771 break; 772 break;
772 } 773 }
773} 774}
774 775
776static void alc880_unsol_event(struct hda_codec *codec, unsigned int res)
777{
778 alc_exec_unsol_event(codec, res >> 28);
779}
780
775static void alc880_uniwill_p53_setup(struct hda_codec *codec) 781static void alc880_uniwill_p53_setup(struct hda_codec *codec)
776{ 782{
777 struct alc_spec *spec = codec->spec; 783 struct alc_spec *spec = codec->spec;
@@ -800,10 +806,11 @@ static void alc880_uniwill_p53_unsol_event(struct hda_codec *codec,
800 /* Looks like the unsol event is incompatible with the standard 806 /* Looks like the unsol event is incompatible with the standard
801 * definition. 4bit tag is placed at 28 bit! 807 * definition. 4bit tag is placed at 28 bit!
802 */ 808 */
803 if ((res >> 28) == ALC_DCVOL_EVENT) 809 res >>= 28;
810 if (res == ALC_DCVOL_EVENT)
804 alc880_uniwill_p53_dcvol_automute(codec); 811 alc880_uniwill_p53_dcvol_automute(codec);
805 else 812 else
806 alc_sku_unsol_event(codec, res); 813 alc_exec_unsol_event(codec, res);
807} 814}
808 815
809/* 816/*
@@ -1677,7 +1684,7 @@ static const struct alc_config_preset alc880_presets[] = {
1677 .channel_mode = alc880_lg_ch_modes, 1684 .channel_mode = alc880_lg_ch_modes,
1678 .need_dac_fix = 1, 1685 .need_dac_fix = 1,
1679 .input_mux = &alc880_lg_capture_source, 1686 .input_mux = &alc880_lg_capture_source,
1680 .unsol_event = alc_sku_unsol_event, 1687 .unsol_event = alc880_unsol_event,
1681 .setup = alc880_lg_setup, 1688 .setup = alc880_lg_setup,
1682 .init_hook = alc_hp_automute, 1689 .init_hook = alc_hp_automute,
1683#ifdef CONFIG_SND_HDA_POWER_SAVE 1690#ifdef CONFIG_SND_HDA_POWER_SAVE
diff --git a/sound/pci/hda/alc882_quirks.c b/sound/pci/hda/alc882_quirks.c
index bdf0ed4ab3e2..bb364a53f546 100644
--- a/sound/pci/hda/alc882_quirks.c
+++ b/sound/pci/hda/alc882_quirks.c
@@ -730,6 +730,11 @@ static void alc889A_mb31_unsol_event(struct hda_codec *codec, unsigned int res)
730 alc889A_mb31_automute(codec); 730 alc889A_mb31_automute(codec);
731} 731}
732 732
733static void alc882_unsol_event(struct hda_codec *codec, unsigned int res)
734{
735 alc_exec_unsol_event(codec, res >> 26);
736}
737
733/* 738/*
734 * configuration and preset 739 * configuration and preset
735 */ 740 */
@@ -775,7 +780,7 @@ static const struct alc_config_preset alc882_presets[] = {
775 .channel_mode = alc885_mba21_ch_modes, 780 .channel_mode = alc885_mba21_ch_modes,
776 .num_channel_mode = ARRAY_SIZE(alc885_mba21_ch_modes), 781 .num_channel_mode = ARRAY_SIZE(alc885_mba21_ch_modes),
777 .input_mux = &alc882_capture_source, 782 .input_mux = &alc882_capture_source,
778 .unsol_event = alc_sku_unsol_event, 783 .unsol_event = alc882_unsol_event,
779 .setup = alc885_mba21_setup, 784 .setup = alc885_mba21_setup,
780 .init_hook = alc_hp_automute, 785 .init_hook = alc_hp_automute,
781 }, 786 },
@@ -791,7 +796,7 @@ static const struct alc_config_preset alc882_presets[] = {
791 .input_mux = &alc882_capture_source, 796 .input_mux = &alc882_capture_source,
792 .dig_out_nid = ALC882_DIGOUT_NID, 797 .dig_out_nid = ALC882_DIGOUT_NID,
793 .dig_in_nid = ALC882_DIGIN_NID, 798 .dig_in_nid = ALC882_DIGIN_NID,
794 .unsol_event = alc_sku_unsol_event, 799 .unsol_event = alc882_unsol_event,
795 .setup = alc885_mbp3_setup, 800 .setup = alc885_mbp3_setup,
796 .init_hook = alc_hp_automute, 801 .init_hook = alc_hp_automute,
797 }, 802 },
@@ -806,7 +811,7 @@ static const struct alc_config_preset alc882_presets[] = {
806 .input_mux = &mb5_capture_source, 811 .input_mux = &mb5_capture_source,
807 .dig_out_nid = ALC882_DIGOUT_NID, 812 .dig_out_nid = ALC882_DIGOUT_NID,
808 .dig_in_nid = ALC882_DIGIN_NID, 813 .dig_in_nid = ALC882_DIGIN_NID,
809 .unsol_event = alc_sku_unsol_event, 814 .unsol_event = alc882_unsol_event,
810 .setup = alc885_mb5_setup, 815 .setup = alc885_mb5_setup,
811 .init_hook = alc_hp_automute, 816 .init_hook = alc_hp_automute,
812 }, 817 },
@@ -821,7 +826,7 @@ static const struct alc_config_preset alc882_presets[] = {
821 .input_mux = &macmini3_capture_source, 826 .input_mux = &macmini3_capture_source,
822 .dig_out_nid = ALC882_DIGOUT_NID, 827 .dig_out_nid = ALC882_DIGOUT_NID,
823 .dig_in_nid = ALC882_DIGIN_NID, 828 .dig_in_nid = ALC882_DIGIN_NID,
824 .unsol_event = alc_sku_unsol_event, 829 .unsol_event = alc882_unsol_event,
825 .setup = alc885_macmini3_setup, 830 .setup = alc885_macmini3_setup,
826 .init_hook = alc_hp_automute, 831 .init_hook = alc_hp_automute,
827 }, 832 },
@@ -836,7 +841,7 @@ static const struct alc_config_preset alc882_presets[] = {
836 .input_mux = &alc889A_imac91_capture_source, 841 .input_mux = &alc889A_imac91_capture_source,
837 .dig_out_nid = ALC882_DIGOUT_NID, 842 .dig_out_nid = ALC882_DIGOUT_NID,
838 .dig_in_nid = ALC882_DIGIN_NID, 843 .dig_in_nid = ALC882_DIGIN_NID,
839 .unsol_event = alc_sku_unsol_event, 844 .unsol_event = alc882_unsol_event,
840 .setup = alc885_imac91_setup, 845 .setup = alc885_imac91_setup,
841 .init_hook = alc_hp_automute, 846 .init_hook = alc_hp_automute,
842 }, 847 },
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index fb35474c1203..95dfb6874941 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -469,6 +469,7 @@ struct azx {
469 unsigned int irq_pending_warned :1; 469 unsigned int irq_pending_warned :1;
470 unsigned int probing :1; /* codec probing phase */ 470 unsigned int probing :1; /* codec probing phase */
471 unsigned int snoop:1; 471 unsigned int snoop:1;
472 unsigned int align_buffer_size:1;
472 473
473 /* for debugging */ 474 /* for debugging */
474 unsigned int last_cmd[AZX_MAX_CODECS]; 475 unsigned int last_cmd[AZX_MAX_CODECS];
@@ -1690,7 +1691,7 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
1690 runtime->hw.rates = hinfo->rates; 1691 runtime->hw.rates = hinfo->rates;
1691 snd_pcm_limit_hw_rates(runtime); 1692 snd_pcm_limit_hw_rates(runtime);
1692 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); 1693 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
1693 if (align_buffer_size) 1694 if (chip->align_buffer_size)
1694 /* constrain buffer sizes to be multiple of 128 1695 /* constrain buffer sizes to be multiple of 128
1695 bytes. This is more efficient in terms of memory 1696 bytes. This is more efficient in terms of memory
1696 access but isn't required by the HDA spec and 1697 access but isn't required by the HDA spec and
@@ -2773,8 +2774,9 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2773 } 2774 }
2774 2775
2775 /* disable buffer size rounding to 128-byte multiples if supported */ 2776 /* disable buffer size rounding to 128-byte multiples if supported */
2777 chip->align_buffer_size = align_buffer_size;
2776 if (chip->driver_caps & AZX_DCAPS_BUFSIZE) 2778 if (chip->driver_caps & AZX_DCAPS_BUFSIZE)
2777 align_buffer_size = 0; 2779 chip->align_buffer_size = 0;
2778 2780
2779 /* allow 64bit DMA address if supported by H/W */ 2781 /* allow 64bit DMA address if supported by H/W */
2780 if ((gcap & ICH6_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64))) 2782 if ((gcap & ICH6_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 8a32a69c83c3..a7a5733aa4d2 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3027,7 +3027,7 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3027 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD), 3027 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
3028 SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD), 3028 SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
3029 SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS), 3029 SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
3030 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), 3030 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T510", CXT5066_AUTO),
3031 SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO), 3031 SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO),
3032 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), 3032 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
3033 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD), 3033 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 5e82acf77c5a..c95c8bde12d0 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -185,7 +185,6 @@ struct alc_spec {
185 unsigned int vol_in_capsrc:1; /* use capsrc volume (ADC has no vol) */ 185 unsigned int vol_in_capsrc:1; /* use capsrc volume (ADC has no vol) */
186 unsigned int parse_flags; /* passed to snd_hda_parse_pin_defcfg() */ 186 unsigned int parse_flags; /* passed to snd_hda_parse_pin_defcfg() */
187 unsigned int shared_mic_hp:1; /* HP/Mic-in sharing */ 187 unsigned int shared_mic_hp:1; /* HP/Mic-in sharing */
188 unsigned int use_jack_tbl:1; /* 1 for model=auto */
189 188
190 /* auto-mute control */ 189 /* auto-mute control */
191 int automute_mode; 190 int automute_mode;
@@ -621,17 +620,10 @@ static void alc_mic_automute(struct hda_codec *codec)
621 alc_mux_select(codec, 0, spec->int_mic_idx, false); 620 alc_mux_select(codec, 0, spec->int_mic_idx, false);
622} 621}
623 622
624/* unsolicited event for HP jack sensing */ 623/* handle the specified unsol action (ALC_XXX_EVENT) */
625static void alc_sku_unsol_event(struct hda_codec *codec, unsigned int res) 624static void alc_exec_unsol_event(struct hda_codec *codec, int action)
626{ 625{
627 struct alc_spec *spec = codec->spec; 626 switch (action) {
628 if (codec->vendor_id == 0x10ec0880)
629 res >>= 28;
630 else
631 res >>= 26;
632 if (spec->use_jack_tbl)
633 res = snd_hda_jack_get_action(codec, res);
634 switch (res) {
635 case ALC_HP_EVENT: 627 case ALC_HP_EVENT:
636 alc_hp_automute(codec); 628 alc_hp_automute(codec);
637 break; 629 break;
@@ -645,6 +637,17 @@ static void alc_sku_unsol_event(struct hda_codec *codec, unsigned int res)
645 snd_hda_jack_report_sync(codec); 637 snd_hda_jack_report_sync(codec);
646} 638}
647 639
640/* unsolicited event for HP jack sensing */
641static void alc_sku_unsol_event(struct hda_codec *codec, unsigned int res)
642{
643 if (codec->vendor_id == 0x10ec0880)
644 res >>= 28;
645 else
646 res >>= 26;
647 res = snd_hda_jack_get_action(codec, res);
648 alc_exec_unsol_event(codec, res);
649}
650
648/* call init functions of standard auto-mute helpers */ 651/* call init functions of standard auto-mute helpers */
649static void alc_inithook(struct hda_codec *codec) 652static void alc_inithook(struct hda_codec *codec)
650{ 653{
@@ -1883,7 +1886,7 @@ static const struct snd_kcontrol_new alc_beep_mixer[] = {
1883}; 1886};
1884#endif 1887#endif
1885 1888
1886static int alc_build_controls(struct hda_codec *codec) 1889static int __alc_build_controls(struct hda_codec *codec)
1887{ 1890{
1888 struct alc_spec *spec = codec->spec; 1891 struct alc_spec *spec = codec->spec;
1889 struct snd_kcontrol *kctl = NULL; 1892 struct snd_kcontrol *kctl = NULL;
@@ -2029,11 +2032,16 @@ static int alc_build_controls(struct hda_codec *codec)
2029 2032
2030 alc_free_kctls(codec); /* no longer needed */ 2033 alc_free_kctls(codec); /* no longer needed */
2031 2034
2032 err = snd_hda_jack_add_kctls(codec, &spec->autocfg); 2035 return 0;
2036}
2037
2038static int alc_build_controls(struct hda_codec *codec)
2039{
2040 struct alc_spec *spec = codec->spec;
2041 int err = __alc_build_controls(codec);
2033 if (err < 0) 2042 if (err < 0)
2034 return err; 2043 return err;
2035 2044 return snd_hda_jack_add_kctls(codec, &spec->autocfg);
2036 return 0;
2037} 2045}
2038 2046
2039 2047
@@ -3233,7 +3241,7 @@ static int alc_auto_create_multi_out_ctls(struct hda_codec *codec,
3233 int i, err, noutputs; 3241 int i, err, noutputs;
3234 3242
3235 noutputs = cfg->line_outs; 3243 noutputs = cfg->line_outs;
3236 if (spec->multi_ios > 0) 3244 if (spec->multi_ios > 0 && cfg->line_outs < 3)
3237 noutputs += spec->multi_ios; 3245 noutputs += spec->multi_ios;
3238 3246
3239 for (i = 0; i < noutputs; i++) { 3247 for (i = 0; i < noutputs; i++) {
@@ -3904,7 +3912,6 @@ static void set_capture_mixer(struct hda_codec *codec)
3904static void alc_auto_init_std(struct hda_codec *codec) 3912static void alc_auto_init_std(struct hda_codec *codec)
3905{ 3913{
3906 struct alc_spec *spec = codec->spec; 3914 struct alc_spec *spec = codec->spec;
3907 spec->use_jack_tbl = 1;
3908 alc_auto_init_multi_out(codec); 3915 alc_auto_init_multi_out(codec);
3909 alc_auto_init_extra_out(codec); 3916 alc_auto_init_extra_out(codec);
3910 alc_auto_init_analog_input(codec); 3917 alc_auto_init_analog_input(codec);
@@ -4168,6 +4175,8 @@ static int patch_alc880(struct hda_codec *codec)
4168 codec->patch_ops = alc_patch_ops; 4175 codec->patch_ops = alc_patch_ops;
4169 if (board_config == ALC_MODEL_AUTO) 4176 if (board_config == ALC_MODEL_AUTO)
4170 spec->init_hook = alc_auto_init_std; 4177 spec->init_hook = alc_auto_init_std;
4178 else
4179 codec->patch_ops.build_controls = __alc_build_controls;
4171#ifdef CONFIG_SND_HDA_POWER_SAVE 4180#ifdef CONFIG_SND_HDA_POWER_SAVE
4172 if (!spec->loopback.amplist) 4181 if (!spec->loopback.amplist)
4173 spec->loopback.amplist = alc880_loopbacks; 4182 spec->loopback.amplist = alc880_loopbacks;
@@ -4297,6 +4306,8 @@ static int patch_alc260(struct hda_codec *codec)
4297 codec->patch_ops = alc_patch_ops; 4306 codec->patch_ops = alc_patch_ops;
4298 if (board_config == ALC_MODEL_AUTO) 4307 if (board_config == ALC_MODEL_AUTO)
4299 spec->init_hook = alc_auto_init_std; 4308 spec->init_hook = alc_auto_init_std;
4309 else
4310 codec->patch_ops.build_controls = __alc_build_controls;
4300 spec->shutup = alc_eapd_shutup; 4311 spec->shutup = alc_eapd_shutup;
4301#ifdef CONFIG_SND_HDA_POWER_SAVE 4312#ifdef CONFIG_SND_HDA_POWER_SAVE
4302 if (!spec->loopback.amplist) 4313 if (!spec->loopback.amplist)
@@ -4691,6 +4702,8 @@ static int patch_alc882(struct hda_codec *codec)
4691 codec->patch_ops = alc_patch_ops; 4702 codec->patch_ops = alc_patch_ops;
4692 if (board_config == ALC_MODEL_AUTO) 4703 if (board_config == ALC_MODEL_AUTO)
4693 spec->init_hook = alc_auto_init_std; 4704 spec->init_hook = alc_auto_init_std;
4705 else
4706 codec->patch_ops.build_controls = __alc_build_controls;
4694 4707
4695#ifdef CONFIG_SND_HDA_POWER_SAVE 4708#ifdef CONFIG_SND_HDA_POWER_SAVE
4696 if (!spec->loopback.amplist) 4709 if (!spec->loopback.amplist)
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 3556408d6ece..336cfcd324f9 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1608,7 +1608,7 @@ static const struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = {
1608 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a, 1608 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a,
1609 "Alienware M17x", STAC_ALIENWARE_M17X), 1609 "Alienware M17x", STAC_ALIENWARE_M17X),
1610 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490, 1610 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
1611 "Alienware M17x", STAC_ALIENWARE_M17X), 1611 "Alienware M17x R3", STAC_DELL_EQ),
1612 {} /* terminator */ 1612 {} /* terminator */
1613}; 1613};
1614 1614
@@ -4163,13 +4163,15 @@ static int enable_pin_detect(struct hda_codec *codec, hda_nid_t nid,
4163 return 1; 4163 return 1;
4164} 4164}
4165 4165
4166static int is_nid_hp_pin(struct auto_pin_cfg *cfg, hda_nid_t nid) 4166static int is_nid_out_jack_pin(struct auto_pin_cfg *cfg, hda_nid_t nid)
4167{ 4167{
4168 int i; 4168 int i;
4169 for (i = 0; i < cfg->hp_outs; i++) 4169 for (i = 0; i < cfg->hp_outs; i++)
4170 if (cfg->hp_pins[i] == nid) 4170 if (cfg->hp_pins[i] == nid)
4171 return 1; /* nid is a HP-Out */ 4171 return 1; /* nid is a HP-Out */
4172 4172 for (i = 0; i < cfg->line_outs; i++)
4173 if (cfg->line_out_pins[i] == nid)
4174 return 1; /* nid is a line-Out */
4173 return 0; /* nid is not a HP-Out */ 4175 return 0; /* nid is not a HP-Out */
4174}; 4176};
4175 4177
@@ -4375,7 +4377,7 @@ static int stac92xx_init(struct hda_codec *codec)
4375 continue; 4377 continue;
4376 } 4378 }
4377 4379
4378 if (is_nid_hp_pin(cfg, nid)) 4380 if (is_nid_out_jack_pin(cfg, nid))
4379 continue; /* already has an unsol event */ 4381 continue; /* already has an unsol event */
4380 4382
4381 pinctl = snd_hda_codec_read(codec, nid, 0, 4383 pinctl = snd_hda_codec_read(codec, nid, 0,
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index d7bd91831611..f8863ebb4304 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -1457,5 +1457,5 @@ static void __exit sgtl5000_exit(void)
1457module_exit(sgtl5000_exit); 1457module_exit(sgtl5000_exit);
1458 1458
1459MODULE_DESCRIPTION("Freescale SGTL5000 ALSA SoC Codec Driver"); 1459MODULE_DESCRIPTION("Freescale SGTL5000 ALSA SoC Codec Driver");
1460MODULE_AUTHOR("Zeng Zhaoming <zhaoming.zeng@freescale.com>"); 1460MODULE_AUTHOR("Zeng Zhaoming <zengzm.kernel@gmail.com>");
1461MODULE_LICENSE("GPL"); 1461MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 2b40c93601ed..7c7fd925db8d 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -444,6 +444,12 @@ static int _wm8993_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
444 /* Enable the FLL */ 444 /* Enable the FLL */
445 snd_soc_write(codec, WM8993_FLL_CONTROL_1, reg1 | WM8993_FLL_ENA); 445 snd_soc_write(codec, WM8993_FLL_CONTROL_1, reg1 | WM8993_FLL_ENA);
446 446
447 /* Both overestimates */
448 if (Fref < 1000000)
449 msleep(3);
450 else
451 msleep(1);
452
447 dev_dbg(codec->dev, "FLL enabled at %dHz->%dHz\n", Fref, Fout); 453 dev_dbg(codec->dev, "FLL enabled at %dHz->%dHz\n", Fref, Fout);
448 454
449 wm8993->fll_fref = Fref; 455 wm8993->fll_fref = Fref;
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c
index aecdba9f65a1..5780c9b9d569 100644
--- a/sound/soc/imx/imx-pcm-dma-mx2.c
+++ b/sound/soc/imx/imx-pcm-dma-mx2.c
@@ -88,11 +88,13 @@ static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream,
88 iprtd->dma_data.dma_request = dma_params->dma; 88 iprtd->dma_data.dma_request = dma_params->dma;
89 89
90 /* Try to grab a DMA channel */ 90 /* Try to grab a DMA channel */
91 dma_cap_zero(mask); 91 if (!iprtd->dma_chan) {
92 dma_cap_set(DMA_SLAVE, mask); 92 dma_cap_zero(mask);
93 iprtd->dma_chan = dma_request_channel(mask, filter, iprtd); 93 dma_cap_set(DMA_SLAVE, mask);
94 if (!iprtd->dma_chan) 94 iprtd->dma_chan = dma_request_channel(mask, filter, iprtd);
95 return -EINVAL; 95 if (!iprtd->dma_chan)
96 return -EINVAL;
97 }
96 98
97 switch (params_format(params)) { 99 switch (params_format(params)) {
98 case SNDRV_PCM_FORMAT_S16_LE: 100 case SNDRV_PCM_FORMAT_S16_LE:
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 3986520b4677..b5ecf6d23214 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -907,6 +907,10 @@ static void soc_remove_dai_link(struct snd_soc_card *card, int num, int order)
907 if (err < 0) 907 if (err < 0)
908 printk(KERN_ERR "asoc: failed to remove %s\n", platform->name); 908 printk(KERN_ERR "asoc: failed to remove %s\n", platform->name);
909 } 909 }
910
911 /* Make sure all DAPM widgets are freed */
912 snd_soc_dapm_free(&platform->dapm);
913
910 platform->probed = 0; 914 platform->probed = 0;
911 list_del(&platform->card_list); 915 list_del(&platform->card_list);
912 module_put(platform->dev->driver->owner); 916 module_put(platform->dev->driver->owner);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 3ad1f59b8028..1f55ded4047f 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1426,7 +1426,7 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
1426 dapm->target_bias_level = SND_SOC_BIAS_ON; 1426 dapm->target_bias_level = SND_SOC_BIAS_ON;
1427 break; 1427 break;
1428 case SND_SOC_DAPM_STREAM_STOP: 1428 case SND_SOC_DAPM_STREAM_STOP:
1429 if (dapm->codec->active) 1429 if (dapm->codec && dapm->codec->active)
1430 dapm->target_bias_level = SND_SOC_BIAS_ON; 1430 dapm->target_bias_level = SND_SOC_BIAS_ON;
1431 else 1431 else
1432 dapm->target_bias_level = SND_SOC_BIAS_STANDBY; 1432 dapm->target_bias_level = SND_SOC_BIAS_STANDBY;
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 3c6f7808efae..310d3dd5e547 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -811,6 +811,8 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
811 case 0x2C: /* Westmere EP - Gulftown */ 811 case 0x2C: /* Westmere EP - Gulftown */
812 case 0x2A: /* SNB */ 812 case 0x2A: /* SNB */
813 case 0x2D: /* SNB Xeon */ 813 case 0x2D: /* SNB Xeon */
814 case 0x3A: /* IVB */
815 case 0x3D: /* IVB Xeon */
814 return 1; 816 return 1;
815 case 0x2E: /* Nehalem-EX Xeon - Beckton */ 817 case 0x2E: /* Nehalem-EX Xeon - Beckton */
816 case 0x2F: /* Westmere-EX Xeon - Eagleton */ 818 case 0x2F: /* Westmere-EX Xeon - Eagleton */