aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/mips/ralink.txt17
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Makefile4
-rw-r--r--arch/arc/Kconfig4
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arc/include/asm/cache.h3
-rw-r--r--arch/arc/include/asm/cacheflush.h58
-rw-r--r--arch/arc/include/asm/page.h16
-rw-r--r--arch/arc/include/asm/pgtable.h3
-rw-r--r--arch/arc/include/asm/shmparam.h18
-rw-r--r--arch/arc/include/asm/tlb.h11
-rw-r--r--arch/arc/mm/Makefile2
-rw-r--r--arch/arc/mm/cache_arc700.c221
-rw-r--r--arch/arc/mm/mmap.c78
-rw-r--r--arch/arc/mm/tlb.c29
-rw-r--r--arch/arc/plat-tb10x/Kconfig7
-rw-r--r--arch/arm64/include/asm/unistd32.h2
-rw-r--r--arch/arm64/kernel/sys32.S7
-rw-r--r--arch/blackfin/Makefile6
-rw-r--r--arch/blackfin/boot/Makefile16
-rw-r--r--arch/blackfin/include/asm/atomic.h2
-rw-r--r--arch/blackfin/include/asm/bfin_sdh.h31
-rw-r--r--arch/blackfin/include/asm/bitops.h1
-rw-r--r--arch/blackfin/include/asm/def_LPBlackfin.h2
-rw-r--r--arch/blackfin/include/asm/mem_init.h9
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinit.c16
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbmgr.c27
-rw-r--r--arch/blackfin/kernel/cplbinfo.c9
-rw-r--r--arch/blackfin/kernel/setup.c2
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c1
-rw-r--r--arch/blackfin/mach-bf538/boards/ezkit.c1
-rw-r--r--arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h2
-rw-r--r--arch/m68k/Kconfig.cpu12
-rw-r--r--arch/m68k/Kconfig.machine16
-rw-r--r--arch/m68k/Makefile1
-rw-r--r--arch/m68k/include/asm/commproc.h17
-rw-r--r--arch/m68k/include/asm/dbg.h6
-rw-r--r--arch/m68k/include/asm/dma.h2
-rw-r--r--arch/m68k/include/asm/m53xxacr.h4
-rw-r--r--arch/m68k/include/asm/m53xxsim.h (renamed from arch/m68k/include/asm/m532xsim.h)12
-rw-r--r--arch/m68k/include/asm/m54xxacr.h7
-rw-r--r--arch/m68k/include/asm/mcfgpio.h10
-rw-r--r--arch/m68k/include/asm/mcfsim.h4
-rw-r--r--arch/m68k/include/asm/mcftimer.h2
-rw-r--r--arch/m68k/platform/coldfire/Makefile2
-rw-r--r--arch/m68k/platform/coldfire/m53xx.c (renamed from arch/m68k/platform/coldfire/m532x.c)23
-rw-r--r--arch/m68k/platform/coldfire/timers.c2
-rw-r--r--arch/microblaze/configs/mmu_defconfig1
-rw-r--r--arch/microblaze/include/asm/pci.h2
-rw-r--r--arch/microblaze/include/asm/uaccess.h30
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c5
-rw-r--r--arch/microblaze/kernel/head.S20
-rw-r--r--arch/microblaze/kernel/intc.c2
-rw-r--r--arch/microblaze/kernel/process.c5
-rw-r--r--arch/microblaze/mm/init.c9
-rw-r--r--arch/microblaze/pci/pci-common.c1
-rw-r--r--arch/mips/Kbuild4
-rw-r--r--arch/mips/Kconfig52
-rw-r--r--arch/mips/Makefile1
-rw-r--r--arch/mips/alchemy/Kconfig3
-rw-r--r--arch/mips/alchemy/Platform22
-rw-r--r--arch/mips/ar7/memory.c1
-rw-r--r--arch/mips/ath79/setup.c16
-rw-r--r--arch/mips/bcm63xx/Kconfig4
-rw-r--r--arch/mips/bcm63xx/boards/board_bcm963xx.c6
-rw-r--r--arch/mips/bcm63xx/clk.c43
-rw-r--r--arch/mips/bcm63xx/cpu.c142
-rw-r--r--arch/mips/bcm63xx/dev-flash.c6
-rw-r--r--arch/mips/bcm63xx/dev-spi.c26
-rw-r--r--arch/mips/bcm63xx/irq.c22
-rw-r--r--arch/mips/bcm63xx/prom.c2
-rw-r--r--arch/mips/bcm63xx/reset.c28
-rw-r--r--arch/mips/bcm63xx/setup.c5
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c5
-rw-r--r--arch/mips/configs/malta_defconfig69
-rw-r--r--arch/mips/configs/malta_kvm_defconfig456
-rw-r--r--arch/mips/configs/malta_kvm_guest_defconfig453
-rw-r--r--arch/mips/configs/maltaaprp_defconfig195
-rw-r--r--arch/mips/configs/maltasmtc_defconfig196
-rw-r--r--arch/mips/configs/maltasmvp_defconfig199
-rw-r--r--arch/mips/configs/maltaup_defconfig194
-rw-r--r--arch/mips/configs/sead3_defconfig3
-rw-r--r--arch/mips/configs/sead3micro_defconfig122
-rw-r--r--arch/mips/fw/lib/Makefile2
-rw-r--r--arch/mips/fw/lib/cmdline.c101
-rw-r--r--arch/mips/include/asm/asm.h2
-rw-r--r--arch/mips/include/asm/bootinfo.h1
-rw-r--r--arch/mips/include/asm/branch.h40
-rw-r--r--arch/mips/include/asm/cpu-features.h3
-rw-r--r--arch/mips/include/asm/dma-coherence.h15
-rw-r--r--arch/mips/include/asm/dma-mapping.h1
-rw-r--r--arch/mips/include/asm/fpu_emulator.h6
-rw-r--r--arch/mips/include/asm/fw/fw.h47
-rw-r--r--arch/mips/include/asm/gic.h16
-rw-r--r--arch/mips/include/asm/hazards.h371
-rw-r--r--arch/mips/include/asm/inst.h12
-rw-r--r--arch/mips/include/asm/irqflags.h153
-rw-r--r--arch/mips/include/asm/kvm.h55
-rw-r--r--arch/mips/include/asm/kvm_host.h667
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h11
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h141
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h11
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h105
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/ioremap.h1
-rw-r--r--arch/mips/include/asm/mach-generic/dma-coherence.h5
-rw-r--r--arch/mips/include/asm/mach-generic/spaces.h9
-rw-r--r--arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-ralink/mt7620.h84
-rw-r--r--arch/mips/include/asm/mach-ralink/rt288x.h53
-rw-r--r--arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h56
-rw-r--r--arch/mips/include/asm/mach-ralink/rt305x.h27
-rw-r--r--arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h56
-rw-r--r--arch/mips/include/asm/mach-ralink/rt3883.h252
-rw-r--r--arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h55
-rw-r--r--arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h4
-rw-r--r--arch/mips/include/asm/mips-boards/generic.h3
-rw-r--r--arch/mips/include/asm/mips-boards/prom.h47
-rw-r--r--arch/mips/include/asm/mips_machine.h4
-rw-r--r--arch/mips/include/asm/mipsregs.h19
-rw-r--r--arch/mips/include/asm/mmu_context.h116
-rw-r--r--arch/mips/include/asm/netlogic/haldefs.h92
-rw-r--r--arch/mips/include/asm/netlogic/mips-extns.h20
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/pic.h53
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/usb.h64
-rw-r--r--arch/mips/include/asm/pgtable.h1
-rw-r--r--arch/mips/include/asm/processor.h5
-rw-r--r--arch/mips/include/asm/prom.h3
-rw-r--r--arch/mips/include/asm/sn/sn_private.h2
-rw-r--r--arch/mips/include/asm/sn/types.h1
-rw-r--r--arch/mips/include/asm/spinlock.h120
-rw-r--r--arch/mips/include/asm/stackframe.h12
-rw-r--r--arch/mips/include/asm/thread_info.h8
-rw-r--r--arch/mips/include/asm/time.h8
-rw-r--r--arch/mips/include/asm/uaccess.h25
-rw-r--r--arch/mips/include/asm/uasm.h84
-rw-r--r--arch/mips/include/uapi/asm/inst.h564
-rw-r--r--arch/mips/kernel/Makefile7
-rw-r--r--arch/mips/kernel/asm-offsets.c66
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c4
-rw-r--r--arch/mips/kernel/branch.c178
-rw-r--r--arch/mips/kernel/cevt-gic.c104
-rw-r--r--arch/mips/kernel/cevt-r4k.c13
-rw-r--r--arch/mips/kernel/cpu-probe.c3
-rw-r--r--arch/mips/kernel/csrc-gic.c13
-rw-r--r--arch/mips/kernel/genex.S75
-rw-r--r--arch/mips/kernel/irq-gic.c47
-rw-r--r--arch/mips/kernel/linux32.c7
-rw-r--r--arch/mips/kernel/mips_machine.c22
-rw-r--r--arch/mips/kernel/proc.c6
-rw-r--r--arch/mips/kernel/process.c101
-rw-r--r--arch/mips/kernel/prom.c33
-rw-r--r--arch/mips/kernel/scall32-o32.S9
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/setup.c22
-rw-r--r--arch/mips/kernel/signal.c9
-rw-r--r--arch/mips/kernel/smp-mt.c3
-rw-r--r--arch/mips/kernel/smp.c1
-rw-r--r--arch/mips/kernel/smtc-asm.S3
-rw-r--r--arch/mips/kernel/smtc.c10
-rw-r--r--arch/mips/kernel/traps.c318
-rw-r--r--arch/mips/kernel/unaligned.c1489
-rw-r--r--arch/mips/kvm/00README.txt31
-rw-r--r--arch/mips/kvm/Kconfig49
-rw-r--r--arch/mips/kvm/Makefile13
-rw-r--r--arch/mips/kvm/kvm_cb.c14
-rw-r--r--arch/mips/kvm/kvm_locore.S650
-rw-r--r--arch/mips/kvm/kvm_mips.c958
-rw-r--r--arch/mips/kvm/kvm_mips_comm.h23
-rw-r--r--arch/mips/kvm/kvm_mips_commpage.c37
-rw-r--r--arch/mips/kvm/kvm_mips_dyntrans.c149
-rw-r--r--arch/mips/kvm/kvm_mips_emul.c1826
-rw-r--r--arch/mips/kvm/kvm_mips_int.c243
-rw-r--r--arch/mips/kvm/kvm_mips_int.h49
-rw-r--r--arch/mips/kvm/kvm_mips_opcode.h24
-rw-r--r--arch/mips/kvm/kvm_mips_stats.c82
-rw-r--r--arch/mips/kvm/kvm_tlb.c928
-rw-r--r--arch/mips/kvm/kvm_trap_emul.c482
-rw-r--r--arch/mips/kvm/trace.h46
-rw-r--r--arch/mips/lib/bitops.c14
-rw-r--r--arch/mips/lib/dump_tlb.c5
-rw-r--r--arch/mips/lib/memset.S84
-rw-r--r--arch/mips/lib/mips-atomic.c149
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c7
-rw-r--r--arch/mips/lib/strlen_user.S9
-rw-r--r--arch/mips/lib/strncpy_user.S32
-rw-r--r--arch/mips/lib/strnlen_user.S2
-rw-r--r--arch/mips/math-emu/cp1emu.c919
-rw-r--r--arch/mips/math-emu/dsemul.c30
-rw-r--r--arch/mips/mm/Makefile4
-rw-r--r--arch/mips/mm/c-r4k.c30
-rw-r--r--arch/mips/mm/cache.c1
-rw-r--r--arch/mips/mm/dma-default.c25
-rw-r--r--arch/mips/mm/page.c10
-rw-r--r--arch/mips/mm/tlb-r3k.c20
-rw-r--r--arch/mips/mm/tlb-r4k.c4
-rw-r--r--arch/mips/mm/tlb-r8k.c2
-rw-r--r--arch/mips/mm/tlbex.c132
-rw-r--r--arch/mips/mm/uasm-micromips.c221
-rw-r--r--arch/mips/mm/uasm-mips.c205
-rw-r--r--arch/mips/mm/uasm.c326
-rw-r--r--arch/mips/mti-malta/Makefile5
-rw-r--r--arch/mips/mti-malta/Platform6
-rw-r--r--arch/mips/mti-malta/malta-cmdline.c59
-rw-r--r--arch/mips/mti-malta/malta-display.c38
-rw-r--r--arch/mips/mti-malta/malta-init.c153
-rw-r--r--arch/mips/mti-malta/malta-int.c4
-rw-r--r--arch/mips/mti-malta/malta-memory.c104
-rw-r--r--arch/mips/mti-malta/malta-setup.c87
-rw-r--r--arch/mips/mti-malta/malta-time.c55
-rw-r--r--arch/mips/mti-sead3/Makefile8
-rw-r--r--arch/mips/mti-sead3/leds-sead3.c24
-rw-r--r--arch/mips/mti-sead3/sead3-cmdline.c46
-rw-r--r--arch/mips/mti-sead3/sead3-console.c2
-rw-r--r--arch/mips/mti-sead3/sead3-display.c1
-rw-r--r--arch/mips/mti-sead3/sead3-init.c130
-rw-r--r--arch/mips/mti-sead3/sead3-int.c1
-rw-r--r--arch/mips/mti-sead3/sead3-setup.c4
-rw-r--r--arch/mips/mti-sead3/sead3-time.c1
-rw-r--r--arch/mips/netlogic/Kconfig17
-rw-r--r--arch/mips/netlogic/common/smp.c21
-rw-r--r--arch/mips/netlogic/dts/Makefile1
-rw-r--r--arch/mips/netlogic/dts/xlp_evp.dts2
-rw-r--r--arch/mips/netlogic/dts/xlp_svp.dts124
-rw-r--r--arch/mips/netlogic/xlp/nlm_hal.c62
-rw-r--r--arch/mips/netlogic/xlp/setup.c22
-rw-r--r--arch/mips/netlogic/xlp/usb-init.c49
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c2
-rw-r--r--arch/mips/pci/pci-ar71xx.c6
-rw-r--r--arch/mips/pci/pci-ar724x.c18
-rw-r--r--arch/mips/pci/pci-bcm63xx.c11
-rw-r--r--arch/mips/powertv/init.c3
-rw-r--r--arch/mips/powertv/init.h2
-rw-r--r--arch/mips/powertv/memory.c1
-rw-r--r--arch/mips/powertv/powertv_setup.c1
-rw-r--r--arch/mips/ralink/Kconfig23
-rw-r--r--arch/mips/ralink/Makefile3
-rw-r--r--arch/mips/ralink/Platform18
-rw-r--r--arch/mips/ralink/common.h11
-rw-r--r--arch/mips/ralink/dts/Makefile3
-rw-r--r--arch/mips/ralink/dts/mt7620a.dtsi58
-rw-r--r--arch/mips/ralink/dts/mt7620a_eval.dts16
-rw-r--r--arch/mips/ralink/dts/rt2880.dtsi58
-rw-r--r--arch/mips/ralink/dts/rt2880_eval.dts46
-rw-r--r--arch/mips/ralink/dts/rt3050.dtsi52
-rw-r--r--arch/mips/ralink/dts/rt3052_eval.dts12
-rw-r--r--arch/mips/ralink/dts/rt3883.dtsi58
-rw-r--r--arch/mips/ralink/dts/rt3883_eval.dts16
-rw-r--r--arch/mips/ralink/early_printk.c4
-rw-r--r--arch/mips/ralink/irq.c5
-rw-r--r--arch/mips/ralink/mt7620.c234
-rw-r--r--arch/mips/ralink/of.c9
-rw-r--r--arch/mips/ralink/rt288x.c143
-rw-r--r--arch/mips/ralink/rt305x.c70
-rw-r--r--arch/mips/ralink/rt3883.c246
-rw-r--r--arch/mips/sgi-ip27/ip27-klnuma.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c16
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c2
-rw-r--r--arch/parisc/kernel/sys_parisc32.c8
-rw-r--r--arch/powerpc/kernel/entry_64.S2
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c8
-rw-r--r--arch/s390/kernel/compat_wrapper.S13
-rw-r--r--arch/s390/kernel/syscalls.S4
-rw-r--r--arch/sparc/kernel/sys32.S9
-rw-r--r--arch/sparc/kernel/systbls_64.S2
-rw-r--r--arch/unicore32/kernel/sys.c10
-rw-r--r--arch/x86/ia32/sys_ia32.c9
-rw-r--r--arch/x86/include/asm/sys_ia32.h3
-rw-r--r--arch/x86/include/asm/syscalls.h4
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h3
-rw-r--r--arch/x86/kernel/vm86_32.c38
-rw-r--r--arch/x86/kvm/emulate.c42
-rw-r--r--arch/x86/kvm/vmx.c6
-rw-r--r--arch/x86/kvm/x86.c40
-rw-r--r--arch/x86/pci/xen.c5
-rw-r--r--arch/x86/syscalls/syscall_32.tbl2
-rw-r--r--arch/x86/xen/enlighten.c50
-rw-r--r--arch/x86/xen/spinlock.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c5
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c27
-rw-r--r--drivers/gpu/drm/drm_drv.c20
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c6
-rw-r--r--drivers/gpu/drm/drm_mm.c34
-rw-r--r--drivers/gpu/drm/drm_modes.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c15
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c77
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c16
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c44
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c90
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-debug.c15
-rw-r--r--drivers/hid/hid-steelseries.c9
-rw-r--r--drivers/idle/intel_idle.c21
-rw-r--r--drivers/lguest/page_tables.c1
-rw-r--r--drivers/md/dm-bufio.c24
-rw-r--r--drivers/md/dm-cache-metadata.c4
-rw-r--r--drivers/md/dm-cache-policy.h4
-rw-r--r--drivers/md/dm-cache-target.c100
-rw-r--r--drivers/md/dm-mpath.c1
-rw-r--r--drivers/md/dm-snap.c1
-rw-r--r--drivers/md/dm-stripe.c11
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm-thin-metadata.c36
-rw-r--r--drivers/md/dm-thin-metadata.h7
-rw-r--r--drivers/md/dm-thin.c200
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c3
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c127
-rw-r--r--drivers/md/persistent-data/dm-space-map.h23
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c140
-rw-r--r--drivers/platform/x86/Kconfig8
-rw-r--r--drivers/platform/x86/Makefile2
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c9
-rw-r--r--drivers/platform/x86/dell-laptop.c10
-rw-r--r--drivers/platform/x86/dell-wmi-aio.c53
-rw-r--r--drivers/platform/x86/hp-wmi.c24
-rw-r--r--drivers/platform/x86/hp_accel.c3
-rw-r--r--drivers/platform/x86/ideapad-laptop.c6
-rw-r--r--drivers/platform/x86/pvpanic.c124
-rw-r--r--drivers/platform/x86/samsung-q10.c5
-rw-r--r--drivers/platform/x86/sony-laptop.c20
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c24
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c2
-rw-r--r--drivers/scsi/be2iscsi/be.h2
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c172
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h27
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c70
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c375
-rw-r--r--drivers/scsi/be2iscsi/be_main.h29
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c43
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h35
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c55
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c9
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c2
-rw-r--r--drivers/scsi/csiostor/csio_lnode.h2
-rw-r--r--drivers/scsi/csiostor/csio_rnode.h2
-rw-r--r--drivers/scsi/fnic/fnic.h34
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c564
-rw-r--r--drivers/scsi/fnic/fnic_fip.h68
-rw-r--r--drivers/scsi/fnic/fnic_main.c51
-rw-r--r--drivers/scsi/fnic/vnic_dev.c10
-rw-r--r--drivers/scsi/fnic/vnic_dev.h2
-rw-r--r--drivers/scsi/fnic/vnic_devcmd.h67
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c85
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h7
-rw-r--r--drivers/scsi/ipr.c16
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/isci/remote_device.c4
-rw-r--r--drivers/scsi/isci/remote_device.h2
-rw-r--r--drivers/scsi/isci/request.c6
-rw-r--r--drivers/scsi/isci/task.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c18
-rw-r--r--drivers/scsi/libsas/sas_discover.c34
-rw-r--r--drivers/scsi/libsas/sas_expander.c110
-rw-r--r--drivers/scsi/libsas/sas_internal.h10
-rw-r--r--drivers/scsi/libsas/sas_port.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h24
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c166
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c39
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c113
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c91
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c607
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c798
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c106
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c3
-rw-r--r--drivers/scsi/mvsas/mv_init.c2
-rw-r--r--drivers/scsi/mvsas/mv_sas.c16
-rw-r--r--drivers/scsi/mvsas/mv_sas.h4
-rw-r--r--drivers/scsi/pm8001/Makefile7
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c74
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h34
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c817
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c383
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c119
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h181
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c4130
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h1523
-rw-r--r--drivers/scsi/qla2xxx/Kconfig4
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c1
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c45
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_debug.c97
-rw-r--r--drivers/scsi/scsi_error.c37
-rw-r--r--drivers/scsi/scsi_lib.c9
-rw-r--r--drivers/scsi/scsi_pm.c84
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c94
-rw-r--r--drivers/scsi/sd.c42
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/sd_dif.c8
-rw-r--r--drivers/scsi/ufs/Kconfig11
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c217
-rw-r--r--drivers/scsi/ufs/ufshcd.c2
-rw-r--r--drivers/spi/spi-atmel.c51
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/spi/spi.c9
-rw-r--r--drivers/tty/serial/68328serial.c1
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c1
-rw-r--r--drivers/tty/tty_audit.c104
-rw-r--r--drivers/vhost/vringh.c3
-rw-r--r--drivers/video/au1100fb.c22
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/events.c3
-rw-r--r--fs/ecryptfs/crypto.c141
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h3
-rw-r--r--fs/namei.c2
-rw-r--r--fs/nfsd/nfs4proc.c15
-rw-r--r--fs/nfsd/nfs4recover.c12
-rw-r--r--fs/notify/fanotify/fanotify_user.c17
-rw-r--r--fs/romfs/mmap-nommu.c5
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/drm/drm_fb_helper.h15
-rw-r--r--include/linux/audit.h48
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/cpuidle.h2
-rw-r--r--include/linux/device-mapper.h15
-rw-r--r--include/linux/ftrace.h4
-rw-r--r--include/linux/ftrace_event.h1
-rw-r--r--include/linux/hid.h2
-rw-r--r--include/linux/pci_ids.h6
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/spi/spi.h4
-rw-r--r--include/linux/tty.h6
-rw-r--r--include/scsi/libsas.h4
-rw-r--r--include/scsi/osd_protocol.h2
-rw-r--r--include/scsi/sas.h22
-rw-r--r--include/scsi/sas_ata.h4
-rw-r--r--include/scsi/scsi_device.h16
-rw-r--r--include/scsi/scsi_transport_iscsi.h8
-rw-r--r--include/scsi/scsi_transport_sas.h7
-rw-r--r--include/sound/tlv.h6
-rw-r--r--include/uapi/linux/audit.h4
-rw-r--r--kernel/audit.c516
-rw-r--r--kernel/audit.h156
-rw-r--r--kernel/auditfilter.c360
-rw-r--r--kernel/auditsc.c407
-rw-r--r--kernel/params.c5
-rw-r--r--kernel/sys_ni.c1
-rw-r--r--kernel/sysctl_binary.c4
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/ftrace.c126
-rw-r--r--kernel/trace/trace_events.c54
-rw-r--r--kernel/trace/trace_kprobe.c289
-rw-r--r--net/socket.c6
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c58
-rw-r--r--sound/atmel/abdac.c2
-rw-r--r--sound/atmel/ac97c.c2
-rw-r--r--sound/mips/hal2.c1
-rw-r--r--sound/mips/sgio2audio.c1
-rw-r--r--sound/oss/Kconfig1
-rw-r--r--sound/pci/hda/hda_codec.c7
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_conexant.c17
-rw-r--r--sound/pci/hda/patch_hdmi.c54
-rw-r--r--sound/soc/codecs/wm8994.c1
-rw-r--r--sound/soc/davinci/davinci-mcasp.c7
-rw-r--r--sound/soc/soc-dapm.c4
-rw-r--r--tools/power/x86/turbostat/turbostat.c54
-rw-r--r--virt/kvm/kvm_main.c20
483 files changed, 29866 insertions, 5702 deletions
diff --git a/Documentation/devicetree/bindings/mips/ralink.txt b/Documentation/devicetree/bindings/mips/ralink.txt
new file mode 100644
index 000000000000..b35a8d04f8b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/ralink.txt
@@ -0,0 +1,17 @@
1Ralink MIPS SoC device tree bindings
2
31. SoCs
4
5Each device tree must specify a compatible value for the Ralink SoC
6it uses in the compatible property of the root node. The compatible
7value must be one of the following values:
8
9 ralink,rt2880-soc
10 ralink,rt3050-soc
11 ralink,rt3052-soc
12 ralink,rt3350-soc
13 ralink,rt3352-soc
14 ralink,rt3883-soc
15 ralink,rt5350-soc
16 ralink,mt7620a-soc
17 ralink,mt7620n-soc
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 4d1919bf2332..6931c4348d24 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -42,6 +42,7 @@ onnn ON Semiconductor Corp.
42picochip Picochip Ltd 42picochip Picochip Ltd
43powervr PowerVR (deprecated, use img) 43powervr PowerVR (deprecated, use img)
44qcom Qualcomm, Inc. 44qcom Qualcomm, Inc.
45ralink Mediatek/Ralink Technology Corp.
45ramtron Ramtron International 46ramtron Ramtron International
46realtek Realtek Semiconductor Corp. 47realtek Realtek Semiconductor Corp.
47renesas Renesas Electronics Corporation 48renesas Renesas Electronics Corporation
diff --git a/Makefile b/Makefile
index a3a834b11a97..cd11e8857604 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 9 2PATCHLEVEL = 10
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = 4EXTRAVERSION = -rc1
5NAME = Unicycling Gorilla 5NAME = Unicycling Gorilla
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 491ae7923b10..5917099470ea 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -182,6 +182,10 @@ config ARC_CACHE_PAGES
182 Note that Global I/D ENABLE + Per Page DISABLE works but corollary 182 Note that Global I/D ENABLE + Per Page DISABLE works but corollary
183 Global DISABLE + Per Page ENABLE won't work 183 Global DISABLE + Per Page ENABLE won't work
184 184
185config ARC_CACHE_VIPT_ALIASING
186 bool "Support VIPT Aliasing D$"
187 default n
188
185endif #ARC_CACHE 189endif #ARC_CACHE
186 190
187config ARC_HAS_ICCM 191config ARC_HAS_ICCM
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 48af742f8b5a..d8dd660898b9 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -32,7 +32,6 @@ generic-y += resource.h
32generic-y += scatterlist.h 32generic-y += scatterlist.h
33generic-y += sembuf.h 33generic-y += sembuf.h
34generic-y += shmbuf.h 34generic-y += shmbuf.h
35generic-y += shmparam.h
36generic-y += siginfo.h 35generic-y += siginfo.h
37generic-y += socket.h 36generic-y += socket.h
38generic-y += sockios.h 37generic-y += sockios.h
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index 6632273861fd..d5555fe4742a 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -55,9 +55,6 @@
55 : "r"(data), "r"(ptr)); \ 55 : "r"(data), "r"(ptr)); \
56}) 56})
57 57
58/* used to give SHMLBA a value to avoid Cache Aliasing */
59extern unsigned int ARC_shmlba;
60
61#define ARCH_DMA_MINALIGN L1_CACHE_BYTES 58#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
62 59
63/* 60/*
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
index ee1f6eae82d2..9f841af41092 100644
--- a/arch/arc/include/asm/cacheflush.h
+++ b/arch/arc/include/asm/cacheflush.h
@@ -19,6 +19,7 @@
19#define _ASM_CACHEFLUSH_H 19#define _ASM_CACHEFLUSH_H
20 20
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <asm/shmparam.h>
22 23
23/* 24/*
24 * Semantically we need this because icache doesn't snoop dcache/dma. 25 * Semantically we need this because icache doesn't snoop dcache/dma.
@@ -33,7 +34,9 @@ void flush_cache_all(void);
33void flush_icache_range(unsigned long start, unsigned long end); 34void flush_icache_range(unsigned long start, unsigned long end);
34void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len); 35void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len);
35void __inv_icache_page(unsigned long paddr, unsigned long vaddr); 36void __inv_icache_page(unsigned long paddr, unsigned long vaddr);
36void __flush_dcache_page(unsigned long paddr); 37void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr);
38#define __flush_dcache_page(p, v) \
39 ___flush_dcache_page((unsigned long)p, (unsigned long)v)
37 40
38#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 41#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
39 42
@@ -50,18 +53,55 @@ void dma_cache_wback(unsigned long start, unsigned long sz);
50#define flush_cache_vmap(start, end) flush_cache_all() 53#define flush_cache_vmap(start, end) flush_cache_all()
51#define flush_cache_vunmap(start, end) flush_cache_all() 54#define flush_cache_vunmap(start, end) flush_cache_all()
52 55
53/* 56#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
54 * VM callbacks when entire/range of user-space V-P mappings are 57
55 * torn-down/get-invalidated 58#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
56 * 59
57 * Currently we don't support D$ aliasing configs for our VIPT caches
58 * NOPS for VIPT Cache with non-aliasing D$ configurations only
59 */
60#define flush_cache_dup_mm(mm) /* called on fork */
61#define flush_cache_mm(mm) /* called on munmap/exit */ 60#define flush_cache_mm(mm) /* called on munmap/exit */
62#define flush_cache_range(mm, u_vstart, u_vend) 61#define flush_cache_range(mm, u_vstart, u_vend)
63#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */ 62#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
64 63
64#else /* VIPT aliasing dcache */
65
66/* To clear out stale userspace mappings */
67void flush_cache_mm(struct mm_struct *mm);
68void flush_cache_range(struct vm_area_struct *vma,
69 unsigned long start,unsigned long end);
70void flush_cache_page(struct vm_area_struct *vma,
71 unsigned long user_addr, unsigned long page);
72
73/*
74 * To make sure that userspace mapping is flushed to memory before
75 * get_user_pages() uses a kernel mapping to access the page
76 */
77#define ARCH_HAS_FLUSH_ANON_PAGE
78void flush_anon_page(struct vm_area_struct *vma,
79 struct page *page, unsigned long u_vaddr);
80
81#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
82
83/*
84 * Simple wrapper over config option
85 * Bootup code ensures that hardware matches kernel configuration
86 */
87static inline int cache_is_vipt_aliasing(void)
88{
89#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
90 return 1;
91#else
92 return 0;
93#endif
94}
95
96#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 3)
97
98/*
99 * checks if two addresses (after page aligning) index into same cache set
100 */
101#define addr_not_cache_congruent(addr1, addr2) \
102 cache_is_vipt_aliasing() ? \
103 (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0 \
104
65#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 105#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
66do { \ 106do { \
67 memcpy(dst, src, len); \ 107 memcpy(dst, src, len); \
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index bdf546104551..374a35514116 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -16,13 +16,27 @@
16#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) 16#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
17#define free_user_page(page, addr) free_page(addr) 17#define free_user_page(page, addr) free_page(addr)
18 18
19/* TBD: for now don't worry about VIPT D$ aliasing */
20#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) 19#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
21#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) 20#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
22 21
22#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
23
23#define clear_user_page(addr, vaddr, pg) clear_page(addr) 24#define clear_user_page(addr, vaddr, pg) clear_page(addr)
24#define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom) 25#define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom)
25 26
27#else /* VIPT aliasing dcache */
28
29struct vm_area_struct;
30struct page;
31
32#define __HAVE_ARCH_COPY_USER_HIGHPAGE
33
34void copy_user_highpage(struct page *to, struct page *from,
35 unsigned long u_vaddr, struct vm_area_struct *vma);
36void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
37
38#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
39
26#undef STRICT_MM_TYPECHECKS 40#undef STRICT_MM_TYPECHECKS
27 41
28#ifdef STRICT_MM_TYPECHECKS 42#ifdef STRICT_MM_TYPECHECKS
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index b7e36684c091..1cc4720faccb 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -395,6 +395,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
395 395
396#include <asm-generic/pgtable.h> 396#include <asm-generic/pgtable.h>
397 397
398/* to cope with aliasing VIPT cache */
399#define HAVE_ARCH_UNMAPPED_AREA
400
398/* 401/*
399 * No page table caches to initialise 402 * No page table caches to initialise
400 */ 403 */
diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h
new file mode 100644
index 000000000000..fffeecc04270
--- /dev/null
+++ b/arch/arc/include/asm/shmparam.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ARC_ASM_SHMPARAM_H
10#define __ARC_ASM_SHMPARAM_H
11
12/* Handle upto 2 cache bins */
13#define SHMLBA (2 * PAGE_SIZE)
14
15/* Enforce SHMLBA in shmat */
16#define __ARCH_FORCE_SHMLBA
17
18#endif
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
index fe91719866a5..85b6df839bd7 100644
--- a/arch/arc/include/asm/tlb.h
+++ b/arch/arc/include/asm/tlb.h
@@ -30,13 +30,20 @@ do { \
30/* 30/*
31 * This pair is called at time of munmap/exit to flush cache and TLB entries 31 * This pair is called at time of munmap/exit to flush cache and TLB entries
32 * for mappings being torn down. 32 * for mappings being torn down.
33 * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now) 33 * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$
34 * as we don't support aliasing configs in our VIPT D$.
35 * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range 34 * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range
36 * 35 *
37 * Note, read http://lkml.org/lkml/2004/1/15/6 36 * Note, read http://lkml.org/lkml/2004/1/15/6
38 */ 37 */
38#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
39#define tlb_start_vma(tlb, vma) 39#define tlb_start_vma(tlb, vma)
40#else
41#define tlb_start_vma(tlb, vma) \
42do { \
43 if (!tlb->fullmm) \
44 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
45} while(0)
46#endif
40 47
41#define tlb_end_vma(tlb, vma) \ 48#define tlb_end_vma(tlb, vma) \
42do { \ 49do { \
diff --git a/arch/arc/mm/Makefile b/arch/arc/mm/Makefile
index 168dc146a8f6..ac95cc239c1e 100644
--- a/arch/arc/mm/Makefile
+++ b/arch/arc/mm/Makefile
@@ -7,4 +7,4 @@
7# 7#
8 8
9obj-y := extable.o ioremap.o dma.o fault.o init.o 9obj-y := extable.o ioremap.o dma.o fault.o init.o
10obj-y += tlb.o tlbex.o cache_arc700.o 10obj-y += tlb.o tlbex.o cache_arc700.o mmap.o
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index c854cf95f706..2f12bca8aef3 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -68,6 +68,7 @@
68#include <linux/mmu_context.h> 68#include <linux/mmu_context.h>
69#include <linux/syscalls.h> 69#include <linux/syscalls.h>
70#include <linux/uaccess.h> 70#include <linux/uaccess.h>
71#include <linux/pagemap.h>
71#include <asm/cacheflush.h> 72#include <asm/cacheflush.h>
72#include <asm/cachectl.h> 73#include <asm/cachectl.h>
73#include <asm/setup.h> 74#include <asm/setup.h>
@@ -138,6 +139,7 @@ void __cpuinit arc_cache_init(void)
138 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; 139 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
139 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; 140 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
140 int way_pg_ratio = way_pg_ratio; 141 int way_pg_ratio = way_pg_ratio;
142 int dcache_does_alias;
141 char str[256]; 143 char str[256];
142 144
143 printk(arc_cache_mumbojumbo(0, str, sizeof(str))); 145 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
@@ -184,9 +186,13 @@ chk_dc:
184 panic("Cache H/W doesn't match kernel Config"); 186 panic("Cache H/W doesn't match kernel Config");
185 } 187 }
186 188
189 dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE;
190
187 /* check for D-Cache aliasing */ 191 /* check for D-Cache aliasing */
188 if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE) 192 if (dcache_does_alias && !cache_is_vipt_aliasing())
189 panic("D$ aliasing not handled right now\n"); 193 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
194 else if (!dcache_does_alias && cache_is_vipt_aliasing())
195 panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
190#endif 196#endif
191 197
192 /* Set the default Invalidate Mode to "simpy discard dirty lines" 198 /* Set the default Invalidate Mode to "simpy discard dirty lines"
@@ -269,47 +275,57 @@ static inline void __dc_entire_op(const int cacheop)
269 * Per Line Operation on D-Cache 275 * Per Line Operation on D-Cache
270 * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete 276 * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
271 * It's sole purpose is to help gcc generate ZOL 277 * It's sole purpose is to help gcc generate ZOL
278 * (aliasing VIPT dcache flushing needs both vaddr and paddr)
272 */ 279 */
273static inline void __dc_line_loop(unsigned long start, unsigned long sz, 280static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr,
274 int aux_reg) 281 unsigned long sz, const int aux_reg)
275{ 282{
276 int num_lines, slack; 283 int num_lines;
277 284
278 /* Ensure we properly floor/ceil the non-line aligned/sized requests 285 /* Ensure we properly floor/ceil the non-line aligned/sized requests
279 * and have @start - aligned to cache line and integral @num_lines. 286 * and have @paddr - aligned to cache line and integral @num_lines.
280 * This however can be avoided for page sized since: 287 * This however can be avoided for page sized since:
281 * -@start will be cache-line aligned already (being page aligned) 288 * -@paddr will be cache-line aligned already (being page aligned)
282 * -@sz will be integral multiple of line size (being page sized). 289 * -@sz will be integral multiple of line size (being page sized).
283 */ 290 */
284 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { 291 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
285 slack = start & ~DCACHE_LINE_MASK; 292 sz += paddr & ~DCACHE_LINE_MASK;
286 sz += slack; 293 paddr &= DCACHE_LINE_MASK;
287 start -= slack; 294 vaddr &= DCACHE_LINE_MASK;
288 } 295 }
289 296
290 num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN); 297 num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
291 298
299#if (CONFIG_ARC_MMU_VER <= 2)
300 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
301#endif
302
292 while (num_lines-- > 0) { 303 while (num_lines-- > 0) {
293#if (CONFIG_ARC_MMU_VER > 2) 304#if (CONFIG_ARC_MMU_VER > 2)
294 /* 305 /*
295 * Just as for I$, in MMU v3, D$ ops also require 306 * Just as for I$, in MMU v3, D$ ops also require
296 * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops 307 * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
297 * But we pass phy addr for both. This works since Linux
298 * doesn't support aliasing configs for D$, yet.
299 * Thus paddr is enough to provide both tag and index.
300 */ 308 */
301 write_aux_reg(ARC_REG_DC_PTAG, start); 309 write_aux_reg(ARC_REG_DC_PTAG, paddr);
310
311 write_aux_reg(aux_reg, vaddr);
312 vaddr += ARC_DCACHE_LINE_LEN;
313#else
314 /* paddr contains stuffed vaddrs bits */
315 write_aux_reg(aux_reg, paddr);
302#endif 316#endif
303 write_aux_reg(aux_reg, start); 317 paddr += ARC_DCACHE_LINE_LEN;
304 start += ARC_DCACHE_LINE_LEN;
305 } 318 }
306} 319}
307 320
321/* For kernel mappings cache operation: index is same as paddr */
322#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
323
308/* 324/*
309 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback) 325 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
310 */ 326 */
311static inline void __dc_line_op(unsigned long start, unsigned long sz, 327static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
312 const int cacheop) 328 unsigned long sz, const int cacheop)
313{ 329{
314 unsigned long flags, tmp = tmp; 330 unsigned long flags, tmp = tmp;
315 int aux; 331 int aux;
@@ -332,7 +348,7 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
332 else 348 else
333 aux = ARC_REG_DC_FLDL; 349 aux = ARC_REG_DC_FLDL;
334 350
335 __dc_line_loop(start, sz, aux); 351 __dc_line_loop(paddr, vaddr, sz, aux);
336 352
337 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */ 353 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
338 wait_for_flush(); 354 wait_for_flush();
@@ -347,7 +363,8 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
347#else 363#else
348 364
349#define __dc_entire_op(cacheop) 365#define __dc_entire_op(cacheop)
350#define __dc_line_op(start, sz, cacheop) 366#define __dc_line_op(paddr, vaddr, sz, cacheop)
367#define __dc_line_op_k(paddr, sz, cacheop)
351 368
352#endif /* CONFIG_ARC_HAS_DCACHE */ 369#endif /* CONFIG_ARC_HAS_DCACHE */
353 370
@@ -399,49 +416,45 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
399/*********************************************************** 416/***********************************************************
400 * Machine specific helper for per line I-Cache invalidate. 417 * Machine specific helper for per line I-Cache invalidate.
401 */ 418 */
402static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr, 419static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
403 unsigned long sz) 420 unsigned long sz)
404{ 421{
405 unsigned long flags; 422 unsigned long flags;
406 int num_lines, slack; 423 int num_lines;
407 unsigned int addr;
408 424
409 /* 425 /*
410 * Ensure we properly floor/ceil the non-line aligned/sized requests: 426 * Ensure we properly floor/ceil the non-line aligned/sized requests:
411 * However page sized flushes can be compile time optimised. 427 * However page sized flushes can be compile time optimised.
412 * -@phy_start will be cache-line aligned already (being page aligned) 428 * -@paddr will be cache-line aligned already (being page aligned)
413 * -@sz will be integral multiple of line size (being page sized). 429 * -@sz will be integral multiple of line size (being page sized).
414 */ 430 */
415 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { 431 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
416 slack = phy_start & ~ICACHE_LINE_MASK; 432 sz += paddr & ~ICACHE_LINE_MASK;
417 sz += slack; 433 paddr &= ICACHE_LINE_MASK;
418 phy_start -= slack; 434 vaddr &= ICACHE_LINE_MASK;
419 } 435 }
420 436
421 num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN); 437 num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
422 438
423#if (CONFIG_ARC_MMU_VER > 2) 439#if (CONFIG_ARC_MMU_VER <= 2)
424 vaddr &= ~ICACHE_LINE_MASK;
425 addr = phy_start;
426#else
427 /* bits 17:13 of vaddr go as bits 4:0 of paddr */ 440 /* bits 17:13 of vaddr go as bits 4:0 of paddr */
428 addr = phy_start | ((vaddr >> 13) & 0x1F); 441 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
429#endif 442#endif
430 443
431 local_irq_save(flags); 444 local_irq_save(flags);
432 while (num_lines-- > 0) { 445 while (num_lines-- > 0) {
433#if (CONFIG_ARC_MMU_VER > 2) 446#if (CONFIG_ARC_MMU_VER > 2)
434 /* tag comes from phy addr */ 447 /* tag comes from phy addr */
435 write_aux_reg(ARC_REG_IC_PTAG, addr); 448 write_aux_reg(ARC_REG_IC_PTAG, paddr);
436 449
437 /* index bits come from vaddr */ 450 /* index bits come from vaddr */
438 write_aux_reg(ARC_REG_IC_IVIL, vaddr); 451 write_aux_reg(ARC_REG_IC_IVIL, vaddr);
439 vaddr += ARC_ICACHE_LINE_LEN; 452 vaddr += ARC_ICACHE_LINE_LEN;
440#else 453#else
441 /* paddr contains stuffed vaddrs bits */ 454 /* paddr contains stuffed vaddrs bits */
442 write_aux_reg(ARC_REG_IC_IVIL, addr); 455 write_aux_reg(ARC_REG_IC_IVIL, paddr);
443#endif 456#endif
444 addr += ARC_ICACHE_LINE_LEN; 457 paddr += ARC_ICACHE_LINE_LEN;
445 } 458 }
446 local_irq_restore(flags); 459 local_irq_restore(flags);
447} 460}
@@ -457,29 +470,66 @@ static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr,
457 * Exported APIs 470 * Exported APIs
458 */ 471 */
459 472
473/*
474 * Handle cache congruency of kernel and userspace mappings of page when kernel
475 * writes-to/reads-from
476 *
477 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
478 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
479 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
480 * -In SMP, if hardware caches are coherent
481 *
482 * There's a corollary case, where kernel READs from a userspace mapped page.
483 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
484 */
460void flush_dcache_page(struct page *page) 485void flush_dcache_page(struct page *page)
461{ 486{
462 /* Make a note that dcache is not yet flushed for this page */ 487 struct address_space *mapping;
463 set_bit(PG_arch_1, &page->flags); 488
489 if (!cache_is_vipt_aliasing()) {
490 set_bit(PG_arch_1, &page->flags);
491 return;
492 }
493
494 /* don't handle anon pages here */
495 mapping = page_mapping(page);
496 if (!mapping)
497 return;
498
499 /*
500 * pagecache page, file not yet mapped to userspace
501 * Make a note that K-mapping is dirty
502 */
503 if (!mapping_mapped(mapping)) {
504 set_bit(PG_arch_1, &page->flags);
505 } else if (page_mapped(page)) {
506
507 /* kernel reading from page with U-mapping */
508 void *paddr = page_address(page);
509 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
510
511 if (addr_not_cache_congruent(paddr, vaddr))
512 __flush_dcache_page(paddr, vaddr);
513 }
464} 514}
465EXPORT_SYMBOL(flush_dcache_page); 515EXPORT_SYMBOL(flush_dcache_page);
466 516
467 517
468void dma_cache_wback_inv(unsigned long start, unsigned long sz) 518void dma_cache_wback_inv(unsigned long start, unsigned long sz)
469{ 519{
470 __dc_line_op(start, sz, OP_FLUSH_N_INV); 520 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
471} 521}
472EXPORT_SYMBOL(dma_cache_wback_inv); 522EXPORT_SYMBOL(dma_cache_wback_inv);
473 523
474void dma_cache_inv(unsigned long start, unsigned long sz) 524void dma_cache_inv(unsigned long start, unsigned long sz)
475{ 525{
476 __dc_line_op(start, sz, OP_INV); 526 __dc_line_op_k(start, sz, OP_INV);
477} 527}
478EXPORT_SYMBOL(dma_cache_inv); 528EXPORT_SYMBOL(dma_cache_inv);
479 529
480void dma_cache_wback(unsigned long start, unsigned long sz) 530void dma_cache_wback(unsigned long start, unsigned long sz)
481{ 531{
482 __dc_line_op(start, sz, OP_FLUSH); 532 __dc_line_op_k(start, sz, OP_FLUSH);
483} 533}
484EXPORT_SYMBOL(dma_cache_wback); 534EXPORT_SYMBOL(dma_cache_wback);
485 535
@@ -560,7 +610,7 @@ void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
560 610
561 local_irq_save(flags); 611 local_irq_save(flags);
562 __ic_line_inv_vaddr(paddr, vaddr, len); 612 __ic_line_inv_vaddr(paddr, vaddr, len);
563 __dc_line_op(paddr, len, OP_FLUSH); 613 __dc_line_op(paddr, vaddr, len, OP_FLUSH);
564 local_irq_restore(flags); 614 local_irq_restore(flags);
565} 615}
566 616
@@ -570,9 +620,13 @@ void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
570 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); 620 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
571} 621}
572 622
573void __flush_dcache_page(unsigned long paddr) 623/*
624 * wrapper to clearout kernel or userspace mappings of a page
625 * For kernel mappings @vaddr == @paddr
626 */
627void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr)
574{ 628{
575 __dc_line_op(paddr, PAGE_SIZE, OP_FLUSH_N_INV); 629 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
576} 630}
577 631
578void flush_icache_all(void) 632void flush_icache_all(void)
@@ -601,6 +655,87 @@ noinline void flush_cache_all(void)
601 655
602} 656}
603 657
658#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
659
660void flush_cache_mm(struct mm_struct *mm)
661{
662 flush_cache_all();
663}
664
665void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
666 unsigned long pfn)
667{
668 unsigned int paddr = pfn << PAGE_SHIFT;
669
670 __sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE);
671}
672
673void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
674 unsigned long end)
675{
676 flush_cache_all();
677}
678
679void copy_user_highpage(struct page *to, struct page *from,
680 unsigned long u_vaddr, struct vm_area_struct *vma)
681{
682 void *kfrom = page_address(from);
683 void *kto = page_address(to);
684 int clean_src_k_mappings = 0;
685
686 /*
687 * If SRC page was already mapped in userspace AND it's U-mapping is
688 * not congruent with K-mapping, sync former to physical page so that
689 * K-mapping in memcpy below, sees the right data
690 *
691 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
692 * equally valid for SRC page as well
693 */
694 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
695 __flush_dcache_page(kfrom, u_vaddr);
696 clean_src_k_mappings = 1;
697 }
698
699 copy_page(kto, kfrom);
700
701 /*
702 * Mark DST page K-mapping as dirty for a later finalization by
703 * update_mmu_cache(). Although the finalization could have been done
704 * here as well (given that both vaddr/paddr are available).
705 * But update_mmu_cache() already has code to do that for other
706 * non copied user pages (e.g. read faults which wire in pagecache page
707 * directly).
708 */
709 set_bit(PG_arch_1, &to->flags);
710
711 /*
712 * if SRC was already usermapped and non-congruent to kernel mapping
713 * sync the kernel mapping back to physical page
714 */
715 if (clean_src_k_mappings) {
716 __flush_dcache_page(kfrom, kfrom);
717 } else {
718 set_bit(PG_arch_1, &from->flags);
719 }
720}
721
722void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
723{
724 clear_page(to);
725 set_bit(PG_arch_1, &page->flags);
726}
727
728void flush_anon_page(struct vm_area_struct *vma, struct page *page,
729 unsigned long u_vaddr)
730{
731 /* TBD: do we really need to clear the kernel mapping */
732 __flush_dcache_page(page_address(page), u_vaddr);
733 __flush_dcache_page(page_address(page), page_address(page));
734
735}
736
737#endif
738
604/********************************************************************** 739/**********************************************************************
605 * Explicit Cache flush request from user space via syscall 740 * Explicit Cache flush request from user space via syscall
606 * Needed for JITs which generate code on the fly 741 * Needed for JITs which generate code on the fly
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
new file mode 100644
index 000000000000..2e06d56e987b
--- /dev/null
+++ b/arch/arc/mm/mmap.c
@@ -0,0 +1,78 @@
1/*
2 * ARC700 mmap
3 *
4 * (started from arm version - for VIPT alias handling)
5 *
6 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/fs.h>
14#include <linux/mm.h>
15#include <linux/mman.h>
16#include <linux/sched.h>
17#include <asm/cacheflush.h>
18
19#define COLOUR_ALIGN(addr, pgoff) \
20 ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
21 (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
22
23/*
24 * Ensure that shared mappings are correctly aligned to
25 * avoid aliasing issues with VIPT caches.
26 * We need to ensure that
27 * a specific page of an object is always mapped at a multiple of
28 * SHMLBA bytes.
29 */
30unsigned long
31arch_get_unmapped_area(struct file *filp, unsigned long addr,
32 unsigned long len, unsigned long pgoff, unsigned long flags)
33{
34 struct mm_struct *mm = current->mm;
35 struct vm_area_struct *vma;
36 int do_align = 0;
37 int aliasing = cache_is_vipt_aliasing();
38 struct vm_unmapped_area_info info;
39
40 /*
41 * We only need to do colour alignment if D cache aliases.
42 */
43 if (aliasing)
44 do_align = filp || (flags & MAP_SHARED);
45
46 /*
47 * We enforce the MAP_FIXED case.
48 */
49 if (flags & MAP_FIXED) {
50 if (aliasing && flags & MAP_SHARED &&
51 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
52 return -EINVAL;
53 return addr;
54 }
55
56 if (len > TASK_SIZE)
57 return -ENOMEM;
58
59 if (addr) {
60 if (do_align)
61 addr = COLOUR_ALIGN(addr, pgoff);
62 else
63 addr = PAGE_ALIGN(addr);
64
65 vma = find_vma(mm, addr);
66 if (TASK_SIZE - len >= addr &&
67 (!vma || addr + len <= vma->vm_start))
68 return addr;
69 }
70
71 info.flags = 0;
72 info.length = len;
73 info.low_limit = mm->mmap_base;
74 info.high_limit = TASK_SIZE;
75 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
76 info.align_offset = pgoff << PAGE_SHIFT;
77 return vm_unmapped_area(&info);
78}
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 003d69ac6ffa..066145b5f348 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -421,25 +421,40 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
421/* 421/*
422 * Called at the end of pagefault, for a userspace mapped page 422 * Called at the end of pagefault, for a userspace mapped page
423 * -pre-install the corresponding TLB entry into MMU 423 * -pre-install the corresponding TLB entry into MMU
424 * -Finalize the delayed D-cache flush (wback+inv kernel mapping) 424 * -Finalize the delayed D-cache flush of kernel mapping of page due to
425 * flush_dcache_page(), copy_user_page()
426 *
427 * Note that flush (when done) involves both WBACK - so physical page is
428 * in sync as well as INV - so any non-congruent aliases don't remain
425 */ 429 */
426void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, 430void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
427 pte_t *ptep) 431 pte_t *ptep)
428{ 432{
429 unsigned long vaddr = vaddr_unaligned & PAGE_MASK; 433 unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
434 unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
430 435
431 create_tlb(vma, vaddr, ptep); 436 create_tlb(vma, vaddr, ptep);
432 437
433 /* icache doesn't snoop dcache, thus needs to be made coherent here */ 438 /*
434 if (vma->vm_flags & VM_EXEC) { 439 * Exec page : Independent of aliasing/page-color considerations,
440 * since icache doesn't snoop dcache on ARC, any dirty
441 * K-mapping of a code page needs to be wback+inv so that
442 * icache fetch by userspace sees code correctly.
443 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
444 * so userspace sees the right data.
445 * (Avoids the flush for Non-exec + congruent mapping case)
446 */
447 if (vma->vm_flags & VM_EXEC || addr_not_cache_congruent(paddr, vaddr)) {
435 struct page *page = pfn_to_page(pte_pfn(*ptep)); 448 struct page *page = pfn_to_page(pte_pfn(*ptep));
436 449
437 /* if page was dcache dirty, flush now */
438 int dirty = test_and_clear_bit(PG_arch_1, &page->flags); 450 int dirty = test_and_clear_bit(PG_arch_1, &page->flags);
439 if (dirty) { 451 if (dirty) {
440 unsigned long paddr = pte_val(*ptep) & PAGE_MASK; 452 /* wback + inv dcache lines */
441 __flush_dcache_page(paddr); 453 __flush_dcache_page(paddr, paddr);
442 __inv_icache_page(paddr, vaddr); 454
455 /* invalidate any existing icache lines */
456 if (vma->vm_flags & VM_EXEC)
457 __inv_icache_page(paddr, vaddr);
443 } 458 }
444 } 459 }
445} 460}
diff --git a/arch/arc/plat-tb10x/Kconfig b/arch/arc/plat-tb10x/Kconfig
index 4e121272c4e5..1d3452100f1f 100644
--- a/arch/arc/plat-tb10x/Kconfig
+++ b/arch/arc/plat-tb10x/Kconfig
@@ -27,10 +27,3 @@ menuconfig ARC_PLAT_TB10X
27 Abilis Systems. TB10x is based on the ARC700 CPU architecture. 27 Abilis Systems. TB10x is based on the ARC700 CPU architecture.
28 Say Y if you are building a kernel for one of the SOCs in this 28 Say Y if you are building a kernel for one of the SOCs in this
29 series (e.g. TB100 or TB101). If in doubt say N. 29 series (e.g. TB100 or TB101). If in doubt say N.
30
31if ARC_PLAT_TB10X
32
33config GENERIC_GPIO
34 def_bool y
35
36endif
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 12f22492df4c..58125bf008d3 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -389,7 +389,7 @@ __SYSCALL(364, sys_perf_event_open)
389__SYSCALL(365, compat_sys_recvmmsg) 389__SYSCALL(365, compat_sys_recvmmsg)
390__SYSCALL(366, sys_accept4) 390__SYSCALL(366, sys_accept4)
391__SYSCALL(367, sys_fanotify_init) 391__SYSCALL(367, sys_fanotify_init)
392__SYSCALL(368, compat_sys_fanotify_mark_wrapper) 392__SYSCALL(368, compat_sys_fanotify_mark)
393__SYSCALL(369, sys_prlimit64) 393__SYSCALL(369, sys_prlimit64)
394__SYSCALL(370, sys_name_to_handle_at) 394__SYSCALL(370, sys_name_to_handle_at)
395__SYSCALL(371, compat_sys_open_by_handle_at) 395__SYSCALL(371, compat_sys_open_by_handle_at)
diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S
index db01aa978c41..a1b19ed7467c 100644
--- a/arch/arm64/kernel/sys32.S
+++ b/arch/arm64/kernel/sys32.S
@@ -104,13 +104,6 @@ compat_sys_fallocate_wrapper:
104 b sys_fallocate 104 b sys_fallocate
105ENDPROC(compat_sys_fallocate_wrapper) 105ENDPROC(compat_sys_fallocate_wrapper)
106 106
107compat_sys_fanotify_mark_wrapper:
108 orr x2, x2, x3, lsl #32
109 mov w3, w4
110 mov w4, w5
111 b sys_fanotify_mark
112ENDPROC(compat_sys_fanotify_mark_wrapper)
113
114#undef __SYSCALL 107#undef __SYSCALL
115#define __SYSCALL(x, y) .quad y // x 108#define __SYSCALL(x, y) .quad y // x
116 109
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index 66cf00095b84..1fce08632ad7 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -141,11 +141,11 @@ archclean:
141 141
142INSTALL_PATH ?= /tftpboot 142INSTALL_PATH ?= /tftpboot
143boot := arch/$(ARCH)/boot 143boot := arch/$(ARCH)/boot
144BOOT_TARGETS = vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.lzo vmImage.xip 144BOOT_TARGETS = uImage uImage.bin uImage.bz2 uImage.gz uImage.lzma uImage.lzo uImage.xip
145PHONY += $(BOOT_TARGETS) install 145PHONY += $(BOOT_TARGETS) install
146KBUILD_IMAGE := $(boot)/vmImage 146KBUILD_IMAGE := $(boot)/uImage
147 147
148all: vmImage 148all: uImage
149 149
150$(BOOT_TARGETS): vmlinux 150$(BOOT_TARGETS): vmlinux
151 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 151 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile
index f7d27d50d02c..3efaa094fb90 100644
--- a/arch/blackfin/boot/Makefile
+++ b/arch/blackfin/boot/Makefile
@@ -6,7 +6,7 @@
6# for more details. 6# for more details.
7# 7#
8 8
9targets := vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.lzo vmImage.xip 9targets := uImage uImage.bin uImage.bz2 uImage.gz uImage.lzma uImage.lzo uImage.xip
10extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.xip 10extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.xip
11 11
12ifeq ($(CONFIG_RAMKERNEL),y) 12ifeq ($(CONFIG_RAMKERNEL),y)
@@ -39,22 +39,22 @@ quiet_cmd_mk_bin_xip = BIN $@
39$(obj)/vmlinux.bin.xip: $(obj)/vmlinux.bin FORCE 39$(obj)/vmlinux.bin.xip: $(obj)/vmlinux.bin FORCE
40 $(call if_changed,mk_bin_xip) 40 $(call if_changed,mk_bin_xip)
41 41
42$(obj)/vmImage.bin: $(obj)/vmlinux.bin 42$(obj)/uImage.bin: $(obj)/vmlinux.bin
43 $(call if_changed,uimage,none) 43 $(call if_changed,uimage,none)
44 44
45$(obj)/vmImage.bz2: $(obj)/vmlinux.bin.bz2 45$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2
46 $(call if_changed,uimage,bzip2) 46 $(call if_changed,uimage,bzip2)
47 47
48$(obj)/vmImage.gz: $(obj)/vmlinux.bin.gz 48$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz
49 $(call if_changed,uimage,gzip) 49 $(call if_changed,uimage,gzip)
50 50
51$(obj)/vmImage.lzma: $(obj)/vmlinux.bin.lzma 51$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
52 $(call if_changed,uimage,lzma) 52 $(call if_changed,uimage,lzma)
53 53
54$(obj)/vmImage.lzo: $(obj)/vmlinux.bin.lzo 54$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
55 $(call if_changed,uimage,lzo) 55 $(call if_changed,uimage,lzo)
56 56
57$(obj)/vmImage.xip: $(obj)/vmlinux.bin.xip 57$(obj)/uImage.xip: $(obj)/vmlinux.bin.xip
58 $(call if_changed,uimage,none) 58 $(call if_changed,uimage,none)
59 59
60suffix-y := bin 60suffix-y := bin
@@ -64,7 +64,7 @@ suffix-$(CONFIG_KERNEL_LZMA) := lzma
64suffix-$(CONFIG_KERNEL_LZO) := lzo 64suffix-$(CONFIG_KERNEL_LZO) := lzo
65suffix-$(CONFIG_ROMKERNEL) := xip 65suffix-$(CONFIG_ROMKERNEL) := xip
66 66
67$(obj)/vmImage: $(obj)/vmImage.$(suffix-y) 67$(obj)/uImage: $(obj)/uImage.$(suffix-y)
68 @ln -sf $(notdir $<) $@ 68 @ln -sf $(notdir $<) $@
69 69
70install: 70install:
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index c8db653c72d2..a107a98e9978 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -11,7 +11,9 @@
11 11
12#ifdef CONFIG_SMP 12#ifdef CONFIG_SMP
13 13
14#include <asm/barrier.h>
14#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/types.h>
15 17
16asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); 18asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
17asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value); 19asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
diff --git a/arch/blackfin/include/asm/bfin_sdh.h b/arch/blackfin/include/asm/bfin_sdh.h
index 6a4cfe2d3367..a99957ea9e9b 100644
--- a/arch/blackfin/include/asm/bfin_sdh.h
+++ b/arch/blackfin/include/asm/bfin_sdh.h
@@ -24,18 +24,27 @@ struct bfin_sd_host {
24#define CMD_INT_E (1 << 8) /* Command Interrupt */ 24#define CMD_INT_E (1 << 8) /* Command Interrupt */
25#define CMD_PEND_E (1 << 9) /* Command Pending */ 25#define CMD_PEND_E (1 << 9) /* Command Pending */
26#define CMD_E (1 << 10) /* Command Enable */ 26#define CMD_E (1 << 10) /* Command Enable */
27#ifdef RSI_BLKSZ
28#define CMD_CRC_CHECK_D (1 << 11) /* CRC Check is disabled */
29#define CMD_DATA0_BUSY (1 << 12) /* Check for Busy State on the DATA0 pin */
30#endif
27 31
28/* SDH_PWR_CTL bitmasks */ 32/* SDH_PWR_CTL bitmasks */
33#ifndef RSI_BLKSZ
29#define PWR_ON 0x3 /* Power On */ 34#define PWR_ON 0x3 /* Power On */
30#define SD_CMD_OD (1 << 6) /* Open Drain Output */ 35#define SD_CMD_OD (1 << 6) /* Open Drain Output */
31#define ROD_CTL (1 << 7) /* Rod Control */ 36#define ROD_CTL (1 << 7) /* Rod Control */
37#endif
32 38
33/* SDH_CLK_CTL bitmasks */ 39/* SDH_CLK_CTL bitmasks */
34#define CLKDIV 0xff /* MC_CLK Divisor */ 40#define CLKDIV 0xff /* MC_CLK Divisor */
35#define CLK_E (1 << 8) /* MC_CLK Bus Clock Enable */ 41#define CLK_E (1 << 8) /* MC_CLK Bus Clock Enable */
36#define PWR_SV_E (1 << 9) /* Power Save Enable */ 42#define PWR_SV_E (1 << 9) /* Power Save Enable */
37#define CLKDIV_BYPASS (1 << 10) /* Bypass Divisor */ 43#define CLKDIV_BYPASS (1 << 10) /* Bypass Divisor */
38#define WIDE_BUS (1 << 11) /* Wide Bus Mode Enable */ 44#define BUS_MODE_MASK 0x1800 /* Bus Mode Mask */
45#define STD_BUS_1 0x000 /* Standard Bus 1 bit mode */
46#define WIDE_BUS_4 0x800 /* Wide Bus 4 bit mode */
47#define BYTE_BUS_8 0x1000 /* Byte Bus 8 bit mode */
39 48
40/* SDH_RESP_CMD bitmasks */ 49/* SDH_RESP_CMD bitmasks */
41#define RESP_CMD 0x3f /* Response Command */ 50#define RESP_CMD 0x3f /* Response Command */
@@ -45,7 +54,13 @@ struct bfin_sd_host {
45#define DTX_DIR (1 << 1) /* Data Transfer Direction */ 54#define DTX_DIR (1 << 1) /* Data Transfer Direction */
46#define DTX_MODE (1 << 2) /* Data Transfer Mode */ 55#define DTX_MODE (1 << 2) /* Data Transfer Mode */
47#define DTX_DMA_E (1 << 3) /* Data Transfer DMA Enable */ 56#define DTX_DMA_E (1 << 3) /* Data Transfer DMA Enable */
57#ifndef RSI_BLKSZ
48#define DTX_BLK_LGTH (0xf << 4) /* Data Transfer Block Length */ 58#define DTX_BLK_LGTH (0xf << 4) /* Data Transfer Block Length */
59#else
60
61/* Bit masks for SDH_BLK_SIZE */
62#define DTX_BLK_LGTH 0x1fff /* Data Transfer Block Length */
63#endif
49 64
50/* SDH_STATUS bitmasks */ 65/* SDH_STATUS bitmasks */
51#define CMD_CRC_FAIL (1 << 0) /* CMD CRC Fail */ 66#define CMD_CRC_FAIL (1 << 0) /* CMD CRC Fail */
@@ -114,10 +129,14 @@ struct bfin_sd_host {
114/* SDH_E_STATUS bitmasks */ 129/* SDH_E_STATUS bitmasks */
115#define SDIO_INT_DET (1 << 1) /* SDIO Int Detected */ 130#define SDIO_INT_DET (1 << 1) /* SDIO Int Detected */
116#define SD_CARD_DET (1 << 4) /* SD Card Detect */ 131#define SD_CARD_DET (1 << 4) /* SD Card Detect */
132#define SD_CARD_BUSYMODE (1 << 31) /* Card is in Busy mode */
133#define SD_CARD_SLPMODE (1 << 30) /* Card in Sleep Mode */
134#define SD_CARD_READY (1 << 17) /* Card Ready */
117 135
118/* SDH_E_MASK bitmasks */ 136/* SDH_E_MASK bitmasks */
119#define SDIO_MSK (1 << 1) /* Mask SDIO Int Detected */ 137#define SDIO_MSK (1 << 1) /* Mask SDIO Int Detected */
120#define SCD_MSK (1 << 6) /* Mask Card Detect */ 138#define SCD_MSK (1 << 4) /* Mask Card Detect */
139#define CARD_READY_MSK (1 << 16) /* Mask Card Ready */
121 140
122/* SDH_CFG bitmasks */ 141/* SDH_CFG bitmasks */
123#define CLKS_EN (1 << 0) /* Clocks Enable */ 142#define CLKS_EN (1 << 0) /* Clocks Enable */
@@ -126,7 +145,15 @@ struct bfin_sd_host {
126#define SD_RST (1 << 4) /* SDMMC Reset */ 145#define SD_RST (1 << 4) /* SDMMC Reset */
127#define PUP_SDDAT (1 << 5) /* Pull-up SD_DAT */ 146#define PUP_SDDAT (1 << 5) /* Pull-up SD_DAT */
128#define PUP_SDDAT3 (1 << 6) /* Pull-up SD_DAT3 */ 147#define PUP_SDDAT3 (1 << 6) /* Pull-up SD_DAT3 */
148#ifndef RSI_BLKSZ
129#define PD_SDDAT3 (1 << 7) /* Pull-down SD_DAT3 */ 149#define PD_SDDAT3 (1 << 7) /* Pull-down SD_DAT3 */
150#else
151#define PWR_ON 0x600 /* Power On */
152#define SD_CMD_OD (1 << 11) /* Open Drain Output */
153#define BOOT_EN (1 << 12) /* Boot Enable */
154#define BOOT_MODE (1 << 13) /* Alternate Boot Mode */
155#define BOOT_ACK_EN (1 << 14) /* Boot ACK is expected */
156#endif
130 157
131/* SDH_RD_WAIT_EN bitmasks */ 158/* SDH_RD_WAIT_EN bitmasks */
132#define RWR (1 << 0) /* Read Wait Request */ 159#define RWR (1 << 0) /* Read Wait Request */
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index 8a0fed16058f..0ca40dd44724 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -41,6 +41,7 @@
41#include <asm-generic/bitops/non-atomic.h> 41#include <asm-generic/bitops/non-atomic.h>
42#else 42#else
43 43
44#include <asm/barrier.h>
44#include <asm/byteorder.h> /* swab32 */ 45#include <asm/byteorder.h> /* swab32 */
45#include <linux/linkage.h> 46#include <linux/linkage.h>
46 47
diff --git a/arch/blackfin/include/asm/def_LPBlackfin.h b/arch/blackfin/include/asm/def_LPBlackfin.h
index fe0ca03a1cb2..ca67145c6a45 100644
--- a/arch/blackfin/include/asm/def_LPBlackfin.h
+++ b/arch/blackfin/include/asm/def_LPBlackfin.h
@@ -622,10 +622,12 @@ do { \
622#define PAGE_SIZE_4KB 0x00010000 /* 4 KB page size */ 622#define PAGE_SIZE_4KB 0x00010000 /* 4 KB page size */
623#define PAGE_SIZE_1MB 0x00020000 /* 1 MB page size */ 623#define PAGE_SIZE_1MB 0x00020000 /* 1 MB page size */
624#define PAGE_SIZE_4MB 0x00030000 /* 4 MB page size */ 624#define PAGE_SIZE_4MB 0x00030000 /* 4 MB page size */
625#ifdef CONFIG_BF60x
625#define PAGE_SIZE_16KB 0x00040000 /* 16 KB page size */ 626#define PAGE_SIZE_16KB 0x00040000 /* 16 KB page size */
626#define PAGE_SIZE_64KB 0x00050000 /* 64 KB page size */ 627#define PAGE_SIZE_64KB 0x00050000 /* 64 KB page size */
627#define PAGE_SIZE_16MB 0x00060000 /* 16 MB page size */ 628#define PAGE_SIZE_16MB 0x00060000 /* 16 MB page size */
628#define PAGE_SIZE_64MB 0x00070000 /* 64 MB page size */ 629#define PAGE_SIZE_64MB 0x00070000 /* 64 MB page size */
630#endif
629#define CPLB_L1SRAM 0x00000020 /* 0=SRAM mapped in L1, 0=SRAM not 631#define CPLB_L1SRAM 0x00000020 /* 0=SRAM mapped in L1, 0=SRAM not
630 * mapped to L1 632 * mapped to L1
631 */ 633 */
diff --git a/arch/blackfin/include/asm/mem_init.h b/arch/blackfin/include/asm/mem_init.h
index 9b33e7247864..c865b33eeb68 100644
--- a/arch/blackfin/include/asm/mem_init.h
+++ b/arch/blackfin/include/asm/mem_init.h
@@ -335,6 +335,7 @@
335struct ddr_config { 335struct ddr_config {
336 u32 ddr_clk; 336 u32 ddr_clk;
337 u32 dmc_ddrctl; 337 u32 dmc_ddrctl;
338 u32 dmc_effctl;
338 u32 dmc_ddrcfg; 339 u32 dmc_ddrcfg;
339 u32 dmc_ddrtr0; 340 u32 dmc_ddrtr0;
340 u32 dmc_ddrtr1; 341 u32 dmc_ddrtr1;
@@ -348,6 +349,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
348 [0] = { 349 [0] = {
349 .ddr_clk = 125, 350 .ddr_clk = 125,
350 .dmc_ddrctl = 0x00000904, 351 .dmc_ddrctl = 0x00000904,
352 .dmc_effctl = 0x004400C0,
351 .dmc_ddrcfg = 0x00000422, 353 .dmc_ddrcfg = 0x00000422,
352 .dmc_ddrtr0 = 0x20705212, 354 .dmc_ddrtr0 = 0x20705212,
353 .dmc_ddrtr1 = 0x201003CF, 355 .dmc_ddrtr1 = 0x201003CF,
@@ -358,6 +360,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
358 [1] = { 360 [1] = {
359 .ddr_clk = 133, 361 .ddr_clk = 133,
360 .dmc_ddrctl = 0x00000904, 362 .dmc_ddrctl = 0x00000904,
363 .dmc_effctl = 0x004400C0,
361 .dmc_ddrcfg = 0x00000422, 364 .dmc_ddrcfg = 0x00000422,
362 .dmc_ddrtr0 = 0x20806313, 365 .dmc_ddrtr0 = 0x20806313,
363 .dmc_ddrtr1 = 0x2013040D, 366 .dmc_ddrtr1 = 0x2013040D,
@@ -368,6 +371,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
368 [2] = { 371 [2] = {
369 .ddr_clk = 150, 372 .ddr_clk = 150,
370 .dmc_ddrctl = 0x00000904, 373 .dmc_ddrctl = 0x00000904,
374 .dmc_effctl = 0x004400C0,
371 .dmc_ddrcfg = 0x00000422, 375 .dmc_ddrcfg = 0x00000422,
372 .dmc_ddrtr0 = 0x20A07323, 376 .dmc_ddrtr0 = 0x20A07323,
373 .dmc_ddrtr1 = 0x20160492, 377 .dmc_ddrtr1 = 0x20160492,
@@ -378,6 +382,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
378 [3] = { 382 [3] = {
379 .ddr_clk = 166, 383 .ddr_clk = 166,
380 .dmc_ddrctl = 0x00000904, 384 .dmc_ddrctl = 0x00000904,
385 .dmc_effctl = 0x004400C0,
381 .dmc_ddrcfg = 0x00000422, 386 .dmc_ddrcfg = 0x00000422,
382 .dmc_ddrtr0 = 0x20A07323, 387 .dmc_ddrtr0 = 0x20A07323,
383 .dmc_ddrtr1 = 0x2016050E, 388 .dmc_ddrtr1 = 0x2016050E,
@@ -388,6 +393,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
388 [4] = { 393 [4] = {
389 .ddr_clk = 200, 394 .ddr_clk = 200,
390 .dmc_ddrctl = 0x00000904, 395 .dmc_ddrctl = 0x00000904,
396 .dmc_effctl = 0x004400C0,
391 .dmc_ddrcfg = 0x00000422, 397 .dmc_ddrcfg = 0x00000422,
392 .dmc_ddrtr0 = 0x20a07323, 398 .dmc_ddrtr0 = 0x20a07323,
393 .dmc_ddrtr1 = 0x2016050f, 399 .dmc_ddrtr1 = 0x2016050f,
@@ -398,6 +404,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
398 [5] = { 404 [5] = {
399 .ddr_clk = 225, 405 .ddr_clk = 225,
400 .dmc_ddrctl = 0x00000904, 406 .dmc_ddrctl = 0x00000904,
407 .dmc_effctl = 0x004400C0,
401 .dmc_ddrcfg = 0x00000422, 408 .dmc_ddrcfg = 0x00000422,
402 .dmc_ddrtr0 = 0x20E0A424, 409 .dmc_ddrtr0 = 0x20E0A424,
403 .dmc_ddrtr1 = 0x302006DB, 410 .dmc_ddrtr1 = 0x302006DB,
@@ -408,6 +415,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
408 [6] = { 415 [6] = {
409 .ddr_clk = 250, 416 .ddr_clk = 250,
410 .dmc_ddrctl = 0x00000904, 417 .dmc_ddrctl = 0x00000904,
418 .dmc_effctl = 0x004400C0,
411 .dmc_ddrcfg = 0x00000422, 419 .dmc_ddrcfg = 0x00000422,
412 .dmc_ddrtr0 = 0x20E0A424, 420 .dmc_ddrtr0 = 0x20E0A424,
413 .dmc_ddrtr1 = 0x3020079E, 421 .dmc_ddrtr1 = 0x3020079E,
@@ -469,6 +477,7 @@ static inline void init_dmc(u32 dmc_clk)
469 bfin_write_DMC0_TR2(ddr_config_table[i].dmc_ddrtr2); 477 bfin_write_DMC0_TR2(ddr_config_table[i].dmc_ddrtr2);
470 bfin_write_DMC0_MR(ddr_config_table[i].dmc_ddrmr); 478 bfin_write_DMC0_MR(ddr_config_table[i].dmc_ddrmr);
471 bfin_write_DMC0_EMR1(ddr_config_table[i].dmc_ddrmr1); 479 bfin_write_DMC0_EMR1(ddr_config_table[i].dmc_ddrmr1);
480 bfin_write_DMC0_EFFCTL(ddr_config_table[i].dmc_effctl);
472 bfin_write_DMC0_CTL(ddr_config_table[i].dmc_ddrctl); 481 bfin_write_DMC0_CTL(ddr_config_table[i].dmc_ddrctl);
473 break; 482 break;
474 } 483 }
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index 34e96ce02aa9..b49a53b583d5 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -30,6 +30,7 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
30{ 30{
31 int i_d, i_i; 31 int i_d, i_i;
32 unsigned long addr; 32 unsigned long addr;
33 unsigned long cplb_pageflags, cplb_pagesize;
33 34
34 struct cplb_entry *d_tbl = dcplb_tbl[cpu]; 35 struct cplb_entry *d_tbl = dcplb_tbl[cpu];
35 struct cplb_entry *i_tbl = icplb_tbl[cpu]; 36 struct cplb_entry *i_tbl = icplb_tbl[cpu];
@@ -49,11 +50,20 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
49 /* Cover kernel memory with 4M pages. */ 50 /* Cover kernel memory with 4M pages. */
50 addr = 0; 51 addr = 0;
51 52
52 for (; addr < memory_start; addr += 4 * 1024 * 1024) { 53#ifdef PAGE_SIZE_16MB
54 cplb_pageflags = PAGE_SIZE_16MB;
55 cplb_pagesize = SIZE_16M;
56#else
57 cplb_pageflags = PAGE_SIZE_4MB;
58 cplb_pagesize = SIZE_4M;
59#endif
60
61
62 for (; addr < memory_start; addr += cplb_pagesize) {
53 d_tbl[i_d].addr = addr; 63 d_tbl[i_d].addr = addr;
54 d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB; 64 d_tbl[i_d++].data = SDRAM_DGENERIC | cplb_pageflags;
55 i_tbl[i_i].addr = addr; 65 i_tbl[i_i].addr = addr;
56 i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB; 66 i_tbl[i_i++].data = SDRAM_IGENERIC | cplb_pageflags;
57 } 67 }
58 68
59#ifdef CONFIG_ROMKERNEL 69#ifdef CONFIG_ROMKERNEL
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
index e854f9066cbd..79cc0f6dcdd5 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
@@ -145,7 +145,7 @@ MGR_ATTR static int dcplb_miss(int cpu)
145 unsigned long addr = bfin_read_DCPLB_FAULT_ADDR(); 145 unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
146 int status = bfin_read_DCPLB_STATUS(); 146 int status = bfin_read_DCPLB_STATUS();
147 int idx; 147 int idx;
148 unsigned long d_data, base, addr1, eaddr; 148 unsigned long d_data, base, addr1, eaddr, cplb_pagesize, cplb_pageflags;
149 149
150 nr_dcplb_miss[cpu]++; 150 nr_dcplb_miss[cpu]++;
151 if (unlikely(status & FAULT_USERSUPV)) 151 if (unlikely(status & FAULT_USERSUPV))
@@ -167,18 +167,37 @@ MGR_ATTR static int dcplb_miss(int cpu)
167 if (unlikely(d_data == 0)) 167 if (unlikely(d_data == 0))
168 return CPLB_NO_ADDR_MATCH; 168 return CPLB_NO_ADDR_MATCH;
169 169
170 addr1 = addr & ~(SIZE_4M - 1);
171 addr &= ~(SIZE_1M - 1); 170 addr &= ~(SIZE_1M - 1);
172 d_data |= PAGE_SIZE_1MB; 171 d_data |= PAGE_SIZE_1MB;
173 if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) { 172
173 /* BF60x support large than 4M CPLB page size */
174#ifdef PAGE_SIZE_16MB
175 cplb_pageflags = PAGE_SIZE_16MB;
176 cplb_pagesize = SIZE_16M;
177#else
178 cplb_pageflags = PAGE_SIZE_4MB;
179 cplb_pagesize = SIZE_4M;
180#endif
181
182find_pagesize:
183 addr1 = addr & ~(cplb_pagesize - 1);
184 if (addr1 >= base && (addr1 + cplb_pagesize) <= eaddr) {
174 /* 185 /*
175 * This works because 186 * This works because
176 * (PAGE_SIZE_4MB & PAGE_SIZE_1MB) == PAGE_SIZE_1MB. 187 * (PAGE_SIZE_4MB & PAGE_SIZE_1MB) == PAGE_SIZE_1MB.
177 */ 188 */
178 d_data |= PAGE_SIZE_4MB; 189 d_data |= cplb_pageflags;
179 addr = addr1; 190 addr = addr1;
191 goto found_pagesize;
192 } else {
193 if (cplb_pagesize > SIZE_4M) {
194 cplb_pageflags = PAGE_SIZE_4MB;
195 cplb_pagesize = SIZE_4M;
196 goto find_pagesize;
197 }
180 } 198 }
181 199
200found_pagesize:
182#ifdef CONFIG_BF60x 201#ifdef CONFIG_BF60x
183 if ((addr >= ASYNC_BANK0_BASE) 202 if ((addr >= ASYNC_BANK0_BASE)
184 && (addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE)) 203 && (addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE))
diff --git a/arch/blackfin/kernel/cplbinfo.c b/arch/blackfin/kernel/cplbinfo.c
index 404045dcc5e4..5b80d59e66e5 100644
--- a/arch/blackfin/kernel/cplbinfo.c
+++ b/arch/blackfin/kernel/cplbinfo.c
@@ -17,8 +17,13 @@
17#include <asm/cplbinit.h> 17#include <asm/cplbinit.h>
18#include <asm/blackfin.h> 18#include <asm/blackfin.h>
19 19
20static char const page_strtbl[][3] = { "1K", "4K", "1M", "4M" }; 20static char const page_strtbl[][4] = {
21#define page(flags) (((flags) & 0x30000) >> 16) 21 "1K", "4K", "1M", "4M",
22#ifdef CONFIG_BF60x
23 "16K", "64K", "16M", "64M",
24#endif
25};
26#define page(flags) (((flags) & 0x70000) >> 16)
22#define strpage(flags) page_strtbl[page(flags)] 27#define strpage(flags) page_strtbl[page(flags)]
23 28
24struct cplbinfo_data { 29struct cplbinfo_data {
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index fb96e607adcf..107b306b06f1 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -1314,7 +1314,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1314 seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid()); 1314 seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
1315 } 1315 }
1316 1316
1317 seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n", 1317 seq_printf(m, "\ncpu MHz\t\t: %lu.%06lu/%lu.%06lu\n",
1318 cclk/1000000, cclk%1000000, 1318 cclk/1000000, cclk%1000000,
1319 sclk/1000000, sclk%1000000); 1319 sclk/1000000, sclk%1000000);
1320 seq_printf(m, "bogomips\t: %lu.%02lu\n" 1320 seq_printf(m, "bogomips\t: %lu.%02lu\n"
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 95114ed395ac..6a3a14bcd3a1 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -455,6 +455,7 @@ static struct platform_device bfin_async_nand_device = {
455static void bfin_plat_nand_init(void) 455static void bfin_plat_nand_init(void)
456{ 456{
457 gpio_request(BFIN_NAND_PLAT_READY, "bfin_nand_plat"); 457 gpio_request(BFIN_NAND_PLAT_READY, "bfin_nand_plat");
458 gpio_direction_input(BFIN_NAND_PLAT_READY);
458} 459}
459#else 460#else
460static void bfin_plat_nand_init(void) {} 461static void bfin_plat_nand_init(void) {}
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index a4fce0370c1d..755f0dc12010 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -764,7 +764,6 @@ static struct platform_device i2c_bfin_twi1_device = {
764 .num_resources = ARRAY_SIZE(bfin_twi1_resource), 764 .num_resources = ARRAY_SIZE(bfin_twi1_resource),
765 .resource = bfin_twi1_resource, 765 .resource = bfin_twi1_resource,
766}; 766};
767#endif /* CONFIG_BF542 */
768#endif /* CONFIG_I2C_BLACKFIN_TWI */ 767#endif /* CONFIG_I2C_BLACKFIN_TWI */
769 768
770#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 769#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
diff --git a/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h b/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h
index 4954cf3f7e16..102ee4025ac9 100644
--- a/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h
+++ b/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h
@@ -312,6 +312,8 @@
312#define bfin_write_DMC0_EMR1(val) bfin_write32(DMC0_EMR1, val) 312#define bfin_write_DMC0_EMR1(val) bfin_write32(DMC0_EMR1, val)
313#define bfin_read_DMC0_CTL() bfin_read32(DMC0_CTL) 313#define bfin_read_DMC0_CTL() bfin_read32(DMC0_CTL)
314#define bfin_write_DMC0_CTL(val) bfin_write32(DMC0_CTL, val) 314#define bfin_write_DMC0_CTL(val) bfin_write32(DMC0_CTL, val)
315#define bfin_read_DMC0_EFFCTL() bfin_read32(DMC0_EFFCTL)
316#define bfin_write_DMC0_EFFCTL(val) bfin_write32(DMC0_EFFCTL, val)
315#define bfin_read_DMC0_STAT() bfin_read32(DMC0_STAT) 317#define bfin_read_DMC0_STAT() bfin_read32(DMC0_STAT)
316#define bfin_write_DMC0_STAT(val) bfin_write32(DMC0_STAT, val) 318#define bfin_write_DMC0_STAT(val) bfin_write32(DMC0_STAT, val)
317#define bfin_read_DMC0_DLLCTL() bfin_read32(DMC0_DLLCTL) 319#define bfin_read_DMC0_DLLCTL() bfin_read32(DMC0_DLLCTL)
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index d266787725b4..33013dfcd3e1 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -223,13 +223,25 @@ config M5307
223 help 223 help
224 Motorola ColdFire 5307 processor support. 224 Motorola ColdFire 5307 processor support.
225 225
226config M53xx
227 bool
228
226config M532x 229config M532x
227 bool "MCF532x" 230 bool "MCF532x"
228 depends on !MMU 231 depends on !MMU
232 select M53xx
229 select HAVE_CACHE_CB 233 select HAVE_CACHE_CB
230 help 234 help
231 Freescale (Motorola) ColdFire 532x processor support. 235 Freescale (Motorola) ColdFire 532x processor support.
232 236
237config M537x
238 bool "MCF537x"
239 depends on !MMU
240 select M53xx
241 select HAVE_CACHE_CB
242 help
243 Freescale ColdFire 537x processor support.
244
233config M5407 245config M5407
234 bool "MCF5407" 246 bool "MCF5407"
235 depends on !MMU 247 depends on !MMU
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index 7240584d3439..b9ab0a69561c 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -358,6 +358,13 @@ config COBRA5329
358 help 358 help
359 Support for the senTec COBRA5329 board. 359 Support for the senTec COBRA5329 board.
360 360
361config M5373EVB
362 bool "Freescale M5373EVB board support"
363 depends on M537x
364 select FREESCALE
365 help
366 Support for the Freescale M5373EVB board.
367
361config M5407C3 368config M5407C3
362 bool "Motorola M5407C3 board support" 369 bool "Motorola M5407C3 board support"
363 depends on M5407 370 depends on M5407
@@ -539,15 +546,6 @@ config ROMVEC
539 68000 type variants the vectors are at the base of the boot device 546 68000 type variants the vectors are at the base of the boot device
540 on system startup. 547 on system startup.
541 548
542config ROMVECSIZE
543 hex "Size of ROM vector region (in bytes)"
544 default "0x400"
545 depends on ROM
546 help
547 Define the size of the vector region in ROM. For most 68000
548 variants this would be 0x400 bytes in size. Set to 0 if you do
549 not want a vector region at the start of the ROM.
550
551config ROMSTART 549config ROMSTART
552 hex "Address of the base of system image in ROM" 550 hex "Address of the base of system image in ROM"
553 default "0x400" 551 default "0x400"
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index 2f02acfb8edf..7f7830f2c5bc 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -45,6 +45,7 @@ cpuflags-$(CONFIG_M5441x) := $(call cc-option,-mcpu=54455,-mcfv4e)
45cpuflags-$(CONFIG_M54xx) := $(call cc-option,-mcpu=5475,-m5200) 45cpuflags-$(CONFIG_M54xx) := $(call cc-option,-mcpu=5475,-m5200)
46cpuflags-$(CONFIG_M5407) := $(call cc-option,-mcpu=5407,-m5200) 46cpuflags-$(CONFIG_M5407) := $(call cc-option,-mcpu=5407,-m5200)
47cpuflags-$(CONFIG_M532x) := $(call cc-option,-mcpu=532x,-m5307) 47cpuflags-$(CONFIG_M532x) := $(call cc-option,-mcpu=532x,-m5307)
48cpuflags-$(CONFIG_M537x) := $(call cc-option,-mcpu=537x,-m5307)
48cpuflags-$(CONFIG_M5307) := $(call cc-option,-mcpu=5307,-m5200) 49cpuflags-$(CONFIG_M5307) := $(call cc-option,-mcpu=5307,-m5200)
49cpuflags-$(CONFIG_M528x) := $(call cc-option,-mcpu=528x,-m5307) 50cpuflags-$(CONFIG_M528x) := $(call cc-option,-mcpu=528x,-m5307)
50cpuflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307) 51cpuflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307)
diff --git a/arch/m68k/include/asm/commproc.h b/arch/m68k/include/asm/commproc.h
index a73998528d26..66a36bd51aa1 100644
--- a/arch/m68k/include/asm/commproc.h
+++ b/arch/m68k/include/asm/commproc.h
@@ -480,23 +480,6 @@ typedef struct scc_enet {
480#define SICR_ENET_CLKRT ((uint)0x0000003d) 480#define SICR_ENET_CLKRT ((uint)0x0000003d)
481#endif 481#endif
482 482
483#ifdef CONFIG_RPXLITE
484/* This ENET stuff is for the MPC850 with ethernet on SCC2. Some of
485 * this may be unique to the RPX-Lite configuration.
486 * Note TENA is on Port B.
487 */
488#define PA_ENET_RXD ((ushort)0x0004)
489#define PA_ENET_TXD ((ushort)0x0008)
490#define PA_ENET_TCLK ((ushort)0x0200)
491#define PA_ENET_RCLK ((ushort)0x0800)
492#define PB_ENET_TENA ((uint)0x00002000)
493#define PC_ENET_CLSN ((ushort)0x0040)
494#define PC_ENET_RENA ((ushort)0x0080)
495
496#define SICR_ENET_MASK ((uint)0x0000ff00)
497#define SICR_ENET_CLKRT ((uint)0x00003d00)
498#endif
499
500#ifdef CONFIG_BSEIP 483#ifdef CONFIG_BSEIP
501/* This ENET stuff is for the MPC823 with ethernet on SCC2. 484/* This ENET stuff is for the MPC823 with ethernet on SCC2.
502 * This is unique to the BSE ip-Engine board. 485 * This is unique to the BSE ip-Engine board.
diff --git a/arch/m68k/include/asm/dbg.h b/arch/m68k/include/asm/dbg.h
deleted file mode 100644
index 27af3270f671..000000000000
--- a/arch/m68k/include/asm/dbg.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#define DEBUG 1
2#ifdef CONFIG_COLDFIRE
3#define BREAK asm volatile ("halt")
4#else
5#define BREAK *(volatile unsigned char *)0xdeadbee0 = 0
6#endif
diff --git a/arch/m68k/include/asm/dma.h b/arch/m68k/include/asm/dma.h
index 0ff3fc6a6d9a..429fe26e320c 100644
--- a/arch/m68k/include/asm/dma.h
+++ b/arch/m68k/include/asm/dma.h
@@ -39,7 +39,7 @@
39#define MAX_M68K_DMA_CHANNELS 4 39#define MAX_M68K_DMA_CHANNELS 4
40#elif defined(CONFIG_M5272) 40#elif defined(CONFIG_M5272)
41#define MAX_M68K_DMA_CHANNELS 1 41#define MAX_M68K_DMA_CHANNELS 1
42#elif defined(CONFIG_M532x) 42#elif defined(CONFIG_M53xx)
43#define MAX_M68K_DMA_CHANNELS 0 43#define MAX_M68K_DMA_CHANNELS 0
44#else 44#else
45#define MAX_M68K_DMA_CHANNELS 2 45#define MAX_M68K_DMA_CHANNELS 2
diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h
index cd952b0a8bd3..3177ce8331d6 100644
--- a/arch/m68k/include/asm/m53xxacr.h
+++ b/arch/m68k/include/asm/m53xxacr.h
@@ -55,8 +55,8 @@
55#define CACHE_SIZE 0x2000 /* 8k of unified cache */ 55#define CACHE_SIZE 0x2000 /* 8k of unified cache */
56#define ICACHE_SIZE CACHE_SIZE 56#define ICACHE_SIZE CACHE_SIZE
57#define DCACHE_SIZE CACHE_SIZE 57#define DCACHE_SIZE CACHE_SIZE
58#elif defined(CONFIG_M532x) 58#elif defined(CONFIG_M53xx)
59#define CACHE_SIZE 0x4000 /* 32k of unified cache */ 59#define CACHE_SIZE 0x4000 /* 16k of unified cache */
60#define ICACHE_SIZE CACHE_SIZE 60#define ICACHE_SIZE CACHE_SIZE
61#define DCACHE_SIZE CACHE_SIZE 61#define DCACHE_SIZE CACHE_SIZE
62#endif 62#endif
diff --git a/arch/m68k/include/asm/m532xsim.h b/arch/m68k/include/asm/m53xxsim.h
index 8668e47ced0e..faa1a2133bfd 100644
--- a/arch/m68k/include/asm/m532xsim.h
+++ b/arch/m68k/include/asm/m53xxsim.h
@@ -1,15 +1,15 @@
1/****************************************************************************/ 1/****************************************************************************/
2 2
3/* 3/*
4 * m532xsim.h -- ColdFire 5329 registers 4 * m53xxsim.h -- ColdFire 5329 registers
5 */ 5 */
6 6
7/****************************************************************************/ 7/****************************************************************************/
8#ifndef m532xsim_h 8#ifndef m53xxsim_h
9#define m532xsim_h 9#define m53xxsim_h
10/****************************************************************************/ 10/****************************************************************************/
11 11
12#define CPU_NAME "COLDFIRE(m532x)" 12#define CPU_NAME "COLDFIRE(m53xx)"
13#define CPU_INSTR_PER_JIFFY 3 13#define CPU_INSTR_PER_JIFFY 3
14#define MCF_BUSCLK (MCF_CLK / 3) 14#define MCF_BUSCLK (MCF_CLK / 3)
15 15
@@ -107,7 +107,7 @@
107/* 107/*
108 * QSPI module. 108 * QSPI module.
109 */ 109 */
110#define MCFQSPI_BASE 0xFC058000 /* Base address of QSPI */ 110#define MCFQSPI_BASE 0xFC05C000 /* Base address of QSPI */
111#define MCFQSPI_SIZE 0x40 /* Size of QSPI region */ 111#define MCFQSPI_SIZE 0x40 /* Size of QSPI region */
112 112
113#define MCFQSPI_CS0 84 113#define MCFQSPI_CS0 84
@@ -1238,4 +1238,4 @@
1238#define MCFEPORT_EPFR (0xFC094006) 1238#define MCFEPORT_EPFR (0xFC094006)
1239 1239
1240/********************************************************************/ 1240/********************************************************************/
1241#endif /* m532xsim_h */ 1241#endif /* m53xxsim_h */
diff --git a/arch/m68k/include/asm/m54xxacr.h b/arch/m68k/include/asm/m54xxacr.h
index 192bbfeabf70..6d13cae44af5 100644
--- a/arch/m68k/include/asm/m54xxacr.h
+++ b/arch/m68k/include/asm/m54xxacr.h
@@ -96,8 +96,13 @@
96 */ 96 */
97#define ACR0_MODE (ACR_BA(CONFIG_MBAR)+ACR_ADMSK(0x1000000)+ \ 97#define ACR0_MODE (ACR_BA(CONFIG_MBAR)+ACR_ADMSK(0x1000000)+ \
98 ACR_ENABLE+ACR_SUPER+ACR_CM_OFF_PRE+ACR_SP) 98 ACR_ENABLE+ACR_SUPER+ACR_CM_OFF_PRE+ACR_SP)
99#if defined(CONFIG_CACHE_COPYBACK)
99#define ACR1_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \ 100#define ACR1_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
100 ACR_ENABLE+ACR_SUPER+ACR_SP) 101 ACR_ENABLE+ACR_SUPER+ACR_SP+ACR_CM_CP)
102#else
103#define ACR1_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
104 ACR_ENABLE+ACR_SUPER+ACR_SP+ACR_CM_WT)
105#endif
101#define ACR2_MODE 0 106#define ACR2_MODE 0
102#define ACR3_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \ 107#define ACR3_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
103 ACR_ENABLE+ACR_SUPER+ACR_SP) 108 ACR_ENABLE+ACR_SUPER+ACR_SP)
diff --git a/arch/m68k/include/asm/mcfgpio.h b/arch/m68k/include/asm/mcfgpio.h
index fa1059f50dfc..c41ebf45f1d0 100644
--- a/arch/m68k/include/asm/mcfgpio.h
+++ b/arch/m68k/include/asm/mcfgpio.h
@@ -104,7 +104,7 @@ static inline void gpio_free(unsigned gpio)
104#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \ 104#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
105 defined(CONFIG_M520x) || defined(CONFIG_M523x) || \ 105 defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
106 defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 106 defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
107 defined(CONFIG_M532x) || defined(CONFIG_M54xx) || \ 107 defined(CONFIG_M53xx) || defined(CONFIG_M54xx) || \
108 defined(CONFIG_M5441x) 108 defined(CONFIG_M5441x)
109 109
110/* These parts have GPIO organized by 8 bit ports */ 110/* These parts have GPIO organized by 8 bit ports */
@@ -139,7 +139,7 @@ static inline void gpio_free(unsigned gpio)
139 139
140#if defined(CONFIG_M520x) || defined(CONFIG_M523x) || \ 140#if defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
141 defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 141 defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
142 defined(CONFIG_M532x) || defined(CONFIG_M5441x) 142 defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
143/* 143/*
144 * These parts have an 'Edge' Port module (external interrupt/GPIO) which uses 144 * These parts have an 'Edge' Port module (external interrupt/GPIO) which uses
145 * read-modify-write to change an output and a GPIO module which has separate 145 * read-modify-write to change an output and a GPIO module which has separate
@@ -195,7 +195,7 @@ static inline u32 __mcfgpio_ppdr(unsigned gpio)
195 return MCFSIM2_GPIO1READ; 195 return MCFSIM2_GPIO1READ;
196#elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \ 196#elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
197 defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 197 defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
198 defined(CONFIG_M532x) || defined(CONFIG_M5441x) 198 defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
199#if !defined(CONFIG_M5441x) 199#if !defined(CONFIG_M5441x)
200 if (gpio < 8) 200 if (gpio < 8)
201 return MCFEPORT_EPPDR; 201 return MCFEPORT_EPPDR;
@@ -237,7 +237,7 @@ static inline u32 __mcfgpio_podr(unsigned gpio)
237 return MCFSIM2_GPIO1WRITE; 237 return MCFSIM2_GPIO1WRITE;
238#elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \ 238#elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
239 defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 239 defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
240 defined(CONFIG_M532x) || defined(CONFIG_M5441x) 240 defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
241#if !defined(CONFIG_M5441x) 241#if !defined(CONFIG_M5441x)
242 if (gpio < 8) 242 if (gpio < 8)
243 return MCFEPORT_EPDR; 243 return MCFEPORT_EPDR;
@@ -279,7 +279,7 @@ static inline u32 __mcfgpio_pddr(unsigned gpio)
279 return MCFSIM2_GPIO1ENABLE; 279 return MCFSIM2_GPIO1ENABLE;
280#elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \ 280#elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
281 defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 281 defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
282 defined(CONFIG_M532x) || defined(CONFIG_M5441x) 282 defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
283#if !defined(CONFIG_M5441x) 283#if !defined(CONFIG_M5441x)
284 if (gpio < 8) 284 if (gpio < 8)
285 return MCFEPORT_EPDDR; 285 return MCFEPORT_EPDDR;
diff --git a/arch/m68k/include/asm/mcfsim.h b/arch/m68k/include/asm/mcfsim.h
index a04fd9b2714c..bc867de8a1e9 100644
--- a/arch/m68k/include/asm/mcfsim.h
+++ b/arch/m68k/include/asm/mcfsim.h
@@ -36,8 +36,8 @@
36#elif defined(CONFIG_M5307) 36#elif defined(CONFIG_M5307)
37#include <asm/m5307sim.h> 37#include <asm/m5307sim.h>
38#include <asm/mcfintc.h> 38#include <asm/mcfintc.h>
39#elif defined(CONFIG_M532x) 39#elif defined(CONFIG_M53xx)
40#include <asm/m532xsim.h> 40#include <asm/m53xxsim.h>
41#elif defined(CONFIG_M5407) 41#elif defined(CONFIG_M5407)
42#include <asm/m5407sim.h> 42#include <asm/m5407sim.h>
43#include <asm/mcfintc.h> 43#include <asm/mcfintc.h>
diff --git a/arch/m68k/include/asm/mcftimer.h b/arch/m68k/include/asm/mcftimer.h
index da2fa43c2e45..089f0f150bbf 100644
--- a/arch/m68k/include/asm/mcftimer.h
+++ b/arch/m68k/include/asm/mcftimer.h
@@ -19,7 +19,7 @@
19#define MCFTIMER_TRR 0x04 /* Timer Reference (r/w) */ 19#define MCFTIMER_TRR 0x04 /* Timer Reference (r/w) */
20#define MCFTIMER_TCR 0x08 /* Timer Capture reg (r/w) */ 20#define MCFTIMER_TCR 0x08 /* Timer Capture reg (r/w) */
21#define MCFTIMER_TCN 0x0C /* Timer Counter reg (r/w) */ 21#define MCFTIMER_TCN 0x0C /* Timer Counter reg (r/w) */
22#if defined(CONFIG_M532x) || defined(CONFIG_M5441x) 22#if defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
23#define MCFTIMER_TER 0x03 /* Timer Event reg (r/w) */ 23#define MCFTIMER_TER 0x03 /* Timer Event reg (r/w) */
24#else 24#else
25#define MCFTIMER_TER 0x11 /* Timer Event reg (r/w) */ 25#define MCFTIMER_TER 0x11 /* Timer Event reg (r/w) */
diff --git a/arch/m68k/platform/coldfire/Makefile b/arch/m68k/platform/coldfire/Makefile
index 02591a109f8c..68f0fac60099 100644
--- a/arch/m68k/platform/coldfire/Makefile
+++ b/arch/m68k/platform/coldfire/Makefile
@@ -25,7 +25,7 @@ obj-$(CONFIG_M527x) += m527x.o pit.o intc-2.o reset.o
25obj-$(CONFIG_M5272) += m5272.o intc-5272.o timers.o 25obj-$(CONFIG_M5272) += m5272.o intc-5272.o timers.o
26obj-$(CONFIG_M528x) += m528x.o pit.o intc-2.o reset.o 26obj-$(CONFIG_M528x) += m528x.o pit.o intc-2.o reset.o
27obj-$(CONFIG_M5307) += m5307.o timers.o intc.o reset.o 27obj-$(CONFIG_M5307) += m5307.o timers.o intc.o reset.o
28obj-$(CONFIG_M532x) += m532x.o timers.o intc-simr.o reset.o 28obj-$(CONFIG_M53xx) += m53xx.o timers.o intc-simr.o reset.o
29obj-$(CONFIG_M5407) += m5407.o timers.o intc.o reset.o 29obj-$(CONFIG_M5407) += m5407.o timers.o intc.o reset.o
30obj-$(CONFIG_M54xx) += m54xx.o sltimers.o intc-2.o 30obj-$(CONFIG_M54xx) += m54xx.o sltimers.o intc-2.o
31obj-$(CONFIG_M5441x) += m5441x.o pit.o intc-simr.o reset.o 31obj-$(CONFIG_M5441x) += m5441x.o pit.o intc-simr.o reset.o
diff --git a/arch/m68k/platform/coldfire/m532x.c b/arch/m68k/platform/coldfire/m53xx.c
index 7951d1d43357..5286f98fbed0 100644
--- a/arch/m68k/platform/coldfire/m532x.c
+++ b/arch/m68k/platform/coldfire/m53xx.c
@@ -1,7 +1,7 @@
1/***************************************************************************/ 1/***************************************************************************/
2 2
3/* 3/*
4 * linux/arch/m68knommu/platform/532x/config.c 4 * m53xx.c -- platform support for ColdFire 53xx based boards
5 * 5 *
6 * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com) 6 * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
7 * Copyright (C) 2000, Lineo (www.lineo.com) 7 * Copyright (C) 2000, Lineo (www.lineo.com)
@@ -118,7 +118,8 @@ static struct clk * const enable_clks[] __initconst = {
118 &__clk_0_24, /* mcfuart.0 */ 118 &__clk_0_24, /* mcfuart.0 */
119 &__clk_0_25, /* mcfuart.1 */ 119 &__clk_0_25, /* mcfuart.1 */
120 &__clk_0_26, /* mcfuart.2 */ 120 &__clk_0_26, /* mcfuart.2 */
121 121 &__clk_0_28, /* mcftmr.0 */
122 &__clk_0_29, /* mcftmr.1 */
122 &__clk_0_32, /* mcfpit.0 */ 123 &__clk_0_32, /* mcfpit.0 */
123 &__clk_0_33, /* mcfpit.1 */ 124 &__clk_0_33, /* mcfpit.1 */
124 &__clk_0_37, /* mcfeport.0 */ 125 &__clk_0_37, /* mcfeport.0 */
@@ -134,8 +135,6 @@ static struct clk * const disable_clks[] __initconst = {
134 &__clk_0_17, /* edma */ 135 &__clk_0_17, /* edma */
135 &__clk_0_22, /* mcfi2c.0 */ 136 &__clk_0_22, /* mcfi2c.0 */
136 &__clk_0_23, /* mcfqspi.0 */ 137 &__clk_0_23, /* mcfqspi.0 */
137 &__clk_0_28, /* mcftmr.0 */
138 &__clk_0_29, /* mcftmr.1 */
139 &__clk_0_30, /* mcftmr.2 */ 138 &__clk_0_30, /* mcftmr.2 */
140 &__clk_0_31, /* mcftmr.3 */ 139 &__clk_0_31, /* mcftmr.3 */
141 &__clk_0_34, /* mcfpit.2 */ 140 &__clk_0_34, /* mcfpit.2 */
@@ -153,7 +152,7 @@ static struct clk * const disable_clks[] __initconst = {
153}; 152};
154 153
155 154
156static void __init m532x_clk_init(void) 155static void __init m53xx_clk_init(void)
157{ 156{
158 unsigned i; 157 unsigned i;
159 158
@@ -169,7 +168,7 @@ static void __init m532x_clk_init(void)
169 168
170#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) 169#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
171 170
172static void __init m532x_qspi_init(void) 171static void __init m53xx_qspi_init(void)
173{ 172{
174 /* setup QSPS pins for QSPI with gpio CS control */ 173 /* setup QSPS pins for QSPI with gpio CS control */
175 writew(0x01f0, MCFGPIO_PAR_QSPI); 174 writew(0x01f0, MCFGPIO_PAR_QSPI);
@@ -179,7 +178,7 @@ static void __init m532x_qspi_init(void)
179 178
180/***************************************************************************/ 179/***************************************************************************/
181 180
182static void __init m532x_uarts_init(void) 181static void __init m53xx_uarts_init(void)
183{ 182{
184 /* UART GPIO initialization */ 183 /* UART GPIO initialization */
185 writew(readw(MCFGPIO_PAR_UART) | 0x0FFF, MCFGPIO_PAR_UART); 184 writew(readw(MCFGPIO_PAR_UART) | 0x0FFF, MCFGPIO_PAR_UART);
@@ -187,7 +186,7 @@ static void __init m532x_uarts_init(void)
187 186
188/***************************************************************************/ 187/***************************************************************************/
189 188
190static void __init m532x_fec_init(void) 189static void __init m53xx_fec_init(void)
191{ 190{
192 u8 v; 191 u8 v;
193 192
@@ -217,11 +216,11 @@ void __init config_BSP(char *commandp, int size)
217 } 216 }
218#endif 217#endif
219 mach_sched_init = hw_timer_init; 218 mach_sched_init = hw_timer_init;
220 m532x_clk_init(); 219 m53xx_clk_init();
221 m532x_uarts_init(); 220 m53xx_uarts_init();
222 m532x_fec_init(); 221 m53xx_fec_init();
223#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) 222#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
224 m532x_qspi_init(); 223 m53xx_qspi_init();
225#endif 224#endif
226 225
227#ifdef CONFIG_BDM_DISABLE 226#ifdef CONFIG_BDM_DISABLE
diff --git a/arch/m68k/platform/coldfire/timers.c b/arch/m68k/platform/coldfire/timers.c
index 51f6d2af807f..d06068e45764 100644
--- a/arch/m68k/platform/coldfire/timers.c
+++ b/arch/m68k/platform/coldfire/timers.c
@@ -36,7 +36,7 @@
36 */ 36 */
37void coldfire_profile_init(void); 37void coldfire_profile_init(void);
38 38
39#if defined(CONFIG_M532x) || defined(CONFIG_M5441x) 39#if defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
40#define __raw_readtrr __raw_readl 40#define __raw_readtrr __raw_readl
41#define __raw_writetrr __raw_writel 41#define __raw_writetrr __raw_writel
42#else 42#else
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index d2b097a652d9..3649a8b150c0 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -17,7 +17,6 @@ CONFIG_MODULE_UNLOAD=y
17# CONFIG_BLK_DEV_BSG is not set 17# CONFIG_BLK_DEV_BSG is not set
18CONFIG_PARTITION_ADVANCED=y 18CONFIG_PARTITION_ADVANCED=y
19# CONFIG_EFI_PARTITION is not set 19# CONFIG_EFI_PARTITION is not set
20CONFIG_OPT_LIB_ASM=y
21CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1 20CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
22CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1 21CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
23CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1 22CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index 41cc841091b0..d52abb6812fa 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -153,7 +153,5 @@ extern void __init xilinx_pci_init(void);
153static inline void __init xilinx_pci_init(void) { return; } 153static inline void __init xilinx_pci_init(void) { return; }
154#endif 154#endif
155 155
156#include <asm-generic/pci-dma-compat.h>
157
158#endif /* __KERNEL__ */ 156#endif /* __KERNEL__ */
159#endif /* __ASM_MICROBLAZE_PCI_H */ 157#endif /* __ASM_MICROBLAZE_PCI_H */
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index a1ab5f0009ef..efe59d881789 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -90,17 +90,25 @@ static inline int ___range_ok(unsigned long addr, unsigned long size)
90 90
91#else 91#else
92 92
93/* 93static inline int access_ok(int type, const void __user *addr,
94 * Address is valid if: 94 unsigned long size)
95 * - "addr", "addr + size" and "size" are all below the limit 95{
96 */ 96 if (!size)
97#define access_ok(type, addr, size) \ 97 goto ok;
98 (get_fs().seg >= (((unsigned long)(addr)) | \ 98
99 (size) | ((unsigned long)(addr) + (size)))) 99 if ((get_fs().seg < ((unsigned long)addr)) ||
100 100 (get_fs().seg < ((unsigned long)addr + size - 1))) {
101/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n", 101 pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
102 type?"WRITE":"READ",addr,size,get_fs().seg)) */ 102 type ? "WRITE" : "READ ", (u32)addr, (u32)size,
103 103 (u32)get_fs().seg);
104 return 0;
105 }
106ok:
107 pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
108 type ? "WRITE" : "READ ", (u32)addr, (u32)size,
109 (u32)get_fs().seg);
110 return 1;
111}
104#endif 112#endif
105 113
106#ifdef CONFIG_MMU 114#ifdef CONFIG_MMU
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 0b2299bcb948..410398f6db55 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -37,6 +37,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
37 {"8.20.a", 0x15}, 37 {"8.20.a", 0x15},
38 {"8.20.b", 0x16}, 38 {"8.20.b", 0x16},
39 {"8.30.a", 0x17}, 39 {"8.30.a", 0x17},
40 {"8.40.a", 0x18},
41 {"8.40.b", 0x19},
40 {NULL, 0}, 42 {NULL, 0},
41}; 43};
42 44
@@ -57,6 +59,9 @@ const struct family_string_key family_string_lookup[] = {
57 {"virtex6", 0xe}, 59 {"virtex6", 0xe},
58 /* FIXME There is no key code defined for spartan2 */ 60 /* FIXME There is no key code defined for spartan2 */
59 {"spartan2", 0xf0}, 61 {"spartan2", 0xf0},
62 {"kintex7", 0x10},
63 {"artix7", 0x11},
64 {"zynq7000", 0x12},
60 {NULL, 0}, 65 {NULL, 0},
61}; 66};
62 67
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index eef84de5e8c8..fcc797feb9db 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -112,16 +112,16 @@ no_fdt_arg:
112 * copy command line directly to cmd_line placed in data section. 112 * copy command line directly to cmd_line placed in data section.
113 */ 113 */
114 beqid r5, skip /* Skip if NULL pointer */ 114 beqid r5, skip /* Skip if NULL pointer */
115 or r6, r0, r0 /* incremment */ 115 or r11, r0, r0 /* incremment */
116 ori r4, r0, cmd_line /* load address of command line */ 116 ori r4, r0, cmd_line /* load address of command line */
117 tophys(r4,r4) /* convert to phys address */ 117 tophys(r4,r4) /* convert to phys address */
118 ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ 118 ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
119_copy_command_line: 119_copy_command_line:
120 /* r2=r5+r6 - r5 contain pointer to command line */ 120 /* r2=r5+r6 - r5 contain pointer to command line */
121 lbu r2, r5, r6 121 lbu r2, r5, r11
122 beqid r2, skip /* Skip if no data */ 122 beqid r2, skip /* Skip if no data */
123 sb r2, r4, r6 /* addr[r4+r6]= r2*/ 123 sb r2, r4, r11 /* addr[r4+r6]= r2 */
124 addik r6, r6, 1 /* increment counting */ 124 addik r11, r11, 1 /* increment counting */
125 bgtid r3, _copy_command_line /* loop for all entries */ 125 bgtid r3, _copy_command_line /* loop for all entries */
126 addik r3, r3, -1 /* decrement loop */ 126 addik r3, r3, -1 /* decrement loop */
127 addik r5, r4, 0 /* add new space for command line */ 127 addik r5, r4, 0 /* add new space for command line */
@@ -131,13 +131,13 @@ skip:
131 131
132#ifdef NOT_COMPILE 132#ifdef NOT_COMPILE
133/* save bram context */ 133/* save bram context */
134 or r6, r0, r0 /* incremment */ 134 or r11, r0, r0 /* incremment */
135 ori r4, r0, TOPHYS(_bram_load_start) /* save bram context */ 135 ori r4, r0, TOPHYS(_bram_load_start) /* save bram context */
136 ori r3, r0, (LMB_SIZE - 4) 136 ori r3, r0, (LMB_SIZE - 4)
137_copy_bram: 137_copy_bram:
138 lw r7, r0, r6 /* r7 = r0 + r6 */ 138 lw r7, r0, r11 /* r7 = r0 + r6 */
139 sw r7, r4, r6 /* addr[r4 + r6] = r7*/ 139 sw r7, r4, r11 /* addr[r4 + r6] = r7 */
140 addik r6, r6, 4 /* increment counting */ 140 addik r11, r11, 4 /* increment counting */
141 bgtid r3, _copy_bram /* loop for all entries */ 141 bgtid r3, _copy_bram /* loop for all entries */
142 addik r3, r3, -4 /* descrement loop */ 142 addik r3, r3, -4 /* descrement loop */
143#endif 143#endif
@@ -303,8 +303,8 @@ jump_over2:
303 * the exception vectors, using a 4k real==virtual mapping. 303 * the exception vectors, using a 4k real==virtual mapping.
304 */ 304 */
305 /* Use temporary TLB_ID for LMB - clear this temporary mapping later */ 305 /* Use temporary TLB_ID for LMB - clear this temporary mapping later */
306 ori r6, r0, MICROBLAZE_LMB_TLB_ID 306 ori r11, r0, MICROBLAZE_LMB_TLB_ID
307 mts rtlbx,r6 307 mts rtlbx,r11
308 308
309 ori r4,r0,(TLB_WR | TLB_EX) 309 ori r4,r0,(TLB_WR | TLB_EX)
310 ori r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K)) 310 ori r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index 8778adf72bd3..d85fa3a2b0f8 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -172,4 +172,6 @@ void __init init_IRQ(void)
172 * and commits this patch. ~~gcl */ 172 * and commits this patch. ~~gcl */
173 root_domain = irq_domain_add_linear(intc, nr_irq, &xintc_irq_domain_ops, 173 root_domain = irq_domain_add_linear(intc, nr_irq, &xintc_irq_domain_ops,
174 (void *)intr_mask); 174 (void *)intr_mask);
175
176 irq_set_default_host(root_domain);
175} 177}
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index a55893807274..7d1a9c8b1f3d 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -160,3 +160,8 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
160 return 0; /* MicroBlaze has no separate FPU registers */ 160 return 0; /* MicroBlaze has no separate FPU registers */
161} 161}
162#endif /* CONFIG_MMU */ 162#endif /* CONFIG_MMU */
163
164void arch_cpu_idle(void)
165{
166 local_irq_enable();
167}
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 4ec137d13ad7..b38ae3acfeb4 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -404,10 +404,11 @@ asmlinkage void __init mmu_init(void)
404 404
405#if defined(CONFIG_BLK_DEV_INITRD) 405#if defined(CONFIG_BLK_DEV_INITRD)
406 /* Remove the init RAM disk from the available memory. */ 406 /* Remove the init RAM disk from the available memory. */
407/* if (initrd_start) { 407 if (initrd_start) {
408 mem_pieces_remove(&phys_avail, __pa(initrd_start), 408 unsigned long size;
409 initrd_end - initrd_start, 1); 409 size = initrd_end - initrd_start;
410 }*/ 410 memblock_reserve(virt_to_phys(initrd_start), size);
411 }
411#endif /* CONFIG_BLK_DEV_INITRD */ 412#endif /* CONFIG_BLK_DEV_INITRD */
412 413
413 /* Initialize the MMU hardware */ 414 /* Initialize the MMU hardware */
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 9ea521e4959e..bdb8ea100e73 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -30,7 +30,6 @@
30#include <linux/of.h> 30#include <linux/of.h>
31#include <linux/of_address.h> 31#include <linux/of_address.h>
32#include <linux/of_pci.h> 32#include <linux/of_pci.h>
33#include <linux/pci.h>
34#include <linux/export.h> 33#include <linux/export.h>
35 34
36#include <asm/processor.h> 35#include <asm/processor.h>
diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild
index 7dd65cfae837..d2cfe45f332b 100644
--- a/arch/mips/Kbuild
+++ b/arch/mips/Kbuild
@@ -17,3 +17,7 @@ obj- := $(platform-)
17obj-y += kernel/ 17obj-y += kernel/
18obj-y += mm/ 18obj-y += mm/
19obj-y += math-emu/ 19obj-y += math-emu/
20
21ifdef CONFIG_KVM
22obj-y += kvm/
23endif
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index a90cfc702bb1..7a58ab933b20 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -304,7 +304,6 @@ config MIPS_MALTA
304 select HW_HAS_PCI 304 select HW_HAS_PCI
305 select I8253 305 select I8253
306 select I8259 306 select I8259
307 select MIPS_BOARDS_GEN
308 select MIPS_BONITO64 307 select MIPS_BONITO64
309 select MIPS_CPU_SCACHE 308 select MIPS_CPU_SCACHE
310 select PCI_GT64XXX_PCI0 309 select PCI_GT64XXX_PCI0
@@ -335,12 +334,12 @@ config MIPS_SEAD3
335 select BOOT_RAW 334 select BOOT_RAW
336 select CEVT_R4K 335 select CEVT_R4K
337 select CSRC_R4K 336 select CSRC_R4K
337 select CSRC_GIC
338 select CPU_MIPSR2_IRQ_VI 338 select CPU_MIPSR2_IRQ_VI
339 select CPU_MIPSR2_IRQ_EI 339 select CPU_MIPSR2_IRQ_EI
340 select DMA_NONCOHERENT 340 select DMA_NONCOHERENT
341 select IRQ_CPU 341 select IRQ_CPU
342 select IRQ_GIC 342 select IRQ_GIC
343 select MIPS_BOARDS_GEN
344 select MIPS_CPU_SCACHE 343 select MIPS_CPU_SCACHE
345 select MIPS_MSC 344 select MIPS_MSC
346 select SYS_HAS_CPU_MIPS32_R1 345 select SYS_HAS_CPU_MIPS32_R1
@@ -352,6 +351,7 @@ config MIPS_SEAD3
352 select SYS_SUPPORTS_BIG_ENDIAN 351 select SYS_SUPPORTS_BIG_ENDIAN
353 select SYS_SUPPORTS_LITTLE_ENDIAN 352 select SYS_SUPPORTS_LITTLE_ENDIAN
354 select SYS_SUPPORTS_SMARTMIPS 353 select SYS_SUPPORTS_SMARTMIPS
354 select SYS_SUPPORTS_MICROMIPS
355 select USB_ARCH_HAS_EHCI 355 select USB_ARCH_HAS_EHCI
356 select USB_EHCI_BIG_ENDIAN_DESC 356 select USB_EHCI_BIG_ENDIAN_DESC
357 select USB_EHCI_BIG_ENDIAN_MMIO 357 select USB_EHCI_BIG_ENDIAN_MMIO
@@ -910,6 +910,9 @@ config CEVT_GT641XX
910config CEVT_R4K 910config CEVT_R4K
911 bool 911 bool
912 912
913config CEVT_GIC
914 bool
915
913config CEVT_SB1250 916config CEVT_SB1250
914 bool 917 bool
915 918
@@ -982,9 +985,6 @@ config MIPS_MSC
982config MIPS_NILE4 985config MIPS_NILE4
983 bool 986 bool
984 987
985config MIPS_DISABLE_OBSOLETE_IDE
986 bool
987
988config SYNC_R4K 988config SYNC_R4K
989 bool 989 bool
990 990
@@ -1075,9 +1075,6 @@ config IRQ_GT641XX
1075config IRQ_GIC 1075config IRQ_GIC
1076 bool 1076 bool
1077 1077
1078config MIPS_BOARDS_GEN
1079 bool
1080
1081config PCI_GT64XXX_PCI0 1078config PCI_GT64XXX_PCI0
1082 bool 1079 bool
1083 1080
@@ -1147,7 +1144,7 @@ config BOOT_ELF32
1147 1144
1148config MIPS_L1_CACHE_SHIFT 1145config MIPS_L1_CACHE_SHIFT
1149 int 1146 int
1150 default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL 1147 default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL || SOC_RT288X
1151 default "6" if MIPS_CPU_SCACHE 1148 default "6" if MIPS_CPU_SCACHE
1152 default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM || CPU_CAVIUM_OCTEON 1149 default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM || CPU_CAVIUM_OCTEON
1153 default "5" 1150 default "5"
@@ -1236,6 +1233,7 @@ config CPU_MIPS32_R2
1236 select CPU_HAS_PREFETCH 1233 select CPU_HAS_PREFETCH
1237 select CPU_SUPPORTS_32BIT_KERNEL 1234 select CPU_SUPPORTS_32BIT_KERNEL
1238 select CPU_SUPPORTS_HIGHMEM 1235 select CPU_SUPPORTS_HIGHMEM
1236 select HAVE_KVM
1239 help 1237 help
1240 Choose this option to build a kernel for release 2 or later of the 1238 Choose this option to build a kernel for release 2 or later of the
1241 MIPS32 architecture. Most modern embedded systems with a 32-bit 1239 MIPS32 architecture. Most modern embedded systems with a 32-bit
@@ -1736,6 +1734,20 @@ config 64BIT
1736 1734
1737endchoice 1735endchoice
1738 1736
1737config KVM_GUEST
1738 bool "KVM Guest Kernel"
1739 help
1740 Select this option if building a guest kernel for KVM (Trap & Emulate) mode
1741
1742config KVM_HOST_FREQ
1743 int "KVM Host Processor Frequency (MHz)"
1744 depends on KVM_GUEST
1745 default 500
1746 help
1747 Select this option if building a guest kernel for KVM to skip
1748 RTC emulation when determining guest CPU Frequency. Instead, the guest
1749 processor frequency is automatically derived from the host frequency.
1750
1739choice 1751choice
1740 prompt "Kernel page size" 1752 prompt "Kernel page size"
1741 default PAGE_SIZE_4KB 1753 default PAGE_SIZE_4KB
@@ -1811,6 +1823,15 @@ config FORCE_MAX_ZONEORDER
1811 The page size is not necessarily 4KB. Keep this in mind 1823 The page size is not necessarily 4KB. Keep this in mind
1812 when choosing a value for this option. 1824 when choosing a value for this option.
1813 1825
1826config CEVT_GIC
1827 bool "Use GIC global counter for clock events"
1828 depends on IRQ_GIC && !(MIPS_SEAD3 || MIPS_MT_SMTC)
1829 help
1830 Use the GIC global counter for the clock events. The R4K clock
1831 event driver is always present, so if the platform ends up not
1832 detecting a GIC, it will fall back to the R4K timer for the
1833 generation of clock events.
1834
1814config BOARD_SCACHE 1835config BOARD_SCACHE
1815 bool 1836 bool
1816 1837
@@ -2016,6 +2037,7 @@ config SB1_PASS_2_1_WORKAROUNDS
2016 depends on CPU_SB1 && CPU_SB1_PASS_2 2037 depends on CPU_SB1 && CPU_SB1_PASS_2
2017 default y 2038 default y
2018 2039
2040
2019config 64BIT_PHYS_ADDR 2041config 64BIT_PHYS_ADDR
2020 bool 2042 bool
2021 2043
@@ -2034,6 +2056,13 @@ config CPU_HAS_SMARTMIPS
2034 you don't know you probably don't have SmartMIPS and should say N 2056 you don't know you probably don't have SmartMIPS and should say N
2035 here. 2057 here.
2036 2058
2059config CPU_MICROMIPS
2060 depends on SYS_SUPPORTS_MICROMIPS
2061 bool "Build kernel using microMIPS ISA"
2062 help
2063 When this option is enabled the kernel will be built using the
2064 microMIPS ISA
2065
2037config CPU_HAS_WB 2066config CPU_HAS_WB
2038 bool 2067 bool
2039 2068
@@ -2096,6 +2125,9 @@ config SYS_SUPPORTS_HIGHMEM
2096config SYS_SUPPORTS_SMARTMIPS 2125config SYS_SUPPORTS_SMARTMIPS
2097 bool 2126 bool
2098 2127
2128config SYS_SUPPORTS_MICROMIPS
2129 bool
2130
2099config ARCH_FLATMEM_ENABLE 2131config ARCH_FLATMEM_ENABLE
2100 def_bool y 2132 def_bool y
2101 depends on !NUMA && !CPU_LOONGSON2 2133 depends on !NUMA && !CPU_LOONGSON2
@@ -2556,3 +2588,5 @@ source "security/Kconfig"
2556source "crypto/Kconfig" 2588source "crypto/Kconfig"
2557 2589
2558source "lib/Kconfig" 2590source "lib/Kconfig"
2591
2592source "arch/mips/kvm/Kconfig"
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 6f7978f95090..dd58a04ef4bc 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -114,6 +114,7 @@ cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*e
114cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le)) 114cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
115 115
116cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips) 116cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips)
117cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips -mno-jals)
117 118
118cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ 119cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
119 -fno-omit-frame-pointer 120 -fno-omit-frame-pointer
diff --git a/arch/mips/alchemy/Kconfig b/arch/mips/alchemy/Kconfig
index c8862bdc2ff2..7032ac7ecd1b 100644
--- a/arch/mips/alchemy/Kconfig
+++ b/arch/mips/alchemy/Kconfig
@@ -31,7 +31,6 @@ config MIPS_DB1000
31 select ALCHEMY_GPIOINT_AU1000 31 select ALCHEMY_GPIOINT_AU1000
32 select DMA_NONCOHERENT 32 select DMA_NONCOHERENT
33 select HW_HAS_PCI 33 select HW_HAS_PCI
34 select MIPS_DISABLE_OBSOLETE_IDE
35 select SYS_SUPPORTS_BIG_ENDIAN 34 select SYS_SUPPORTS_BIG_ENDIAN
36 select SYS_SUPPORTS_LITTLE_ENDIAN 35 select SYS_SUPPORTS_LITTLE_ENDIAN
37 select SYS_HAS_EARLY_PRINTK 36 select SYS_HAS_EARLY_PRINTK
@@ -41,7 +40,6 @@ config MIPS_DB1235
41 select ARCH_REQUIRE_GPIOLIB 40 select ARCH_REQUIRE_GPIOLIB
42 select HW_HAS_PCI 41 select HW_HAS_PCI
43 select DMA_COHERENT 42 select DMA_COHERENT
44 select MIPS_DISABLE_OBSOLETE_IDE
45 select SYS_SUPPORTS_LITTLE_ENDIAN 43 select SYS_SUPPORTS_LITTLE_ENDIAN
46 select SYS_HAS_EARLY_PRINTK 44 select SYS_HAS_EARLY_PRINTK
47 45
@@ -57,7 +55,6 @@ config MIPS_GPR
57 select ALCHEMY_GPIOINT_AU1000 55 select ALCHEMY_GPIOINT_AU1000
58 select HW_HAS_PCI 56 select HW_HAS_PCI
59 select DMA_NONCOHERENT 57 select DMA_NONCOHERENT
60 select MIPS_DISABLE_OBSOLETE_IDE
61 select SYS_SUPPORTS_LITTLE_ENDIAN 58 select SYS_SUPPORTS_LITTLE_ENDIAN
62 select SYS_HAS_EARLY_PRINTK 59 select SYS_HAS_EARLY_PRINTK
63 60
diff --git a/arch/mips/alchemy/Platform b/arch/mips/alchemy/Platform
index fa1bdd1aea15..b3afcdd8d77a 100644
--- a/arch/mips/alchemy/Platform
+++ b/arch/mips/alchemy/Platform
@@ -5,32 +5,14 @@ platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/common/
5 5
6 6
7# 7#
8# AMD Alchemy Pb1100 eval board 8# AMD Alchemy Db1000/Db1500/Pb1500/Db1100/Pb1100 eval boards
9#
10platform-$(CONFIG_MIPS_PB1100) += alchemy/devboards/
11load-$(CONFIG_MIPS_PB1100) += 0xffffffff80100000
12
13#
14# AMD Alchemy Pb1500 eval board
15#
16platform-$(CONFIG_MIPS_PB1500) += alchemy/devboards/
17load-$(CONFIG_MIPS_PB1500) += 0xffffffff80100000
18
19#
20# AMD Alchemy Pb1550 eval board
21#
22platform-$(CONFIG_MIPS_PB1550) += alchemy/devboards/
23load-$(CONFIG_MIPS_PB1550) += 0xffffffff80100000
24
25#
26# AMD Alchemy Db1000/Db1500/Db1100 eval boards
27# 9#
28platform-$(CONFIG_MIPS_DB1000) += alchemy/devboards/ 10platform-$(CONFIG_MIPS_DB1000) += alchemy/devboards/
29cflags-$(CONFIG_MIPS_DB1000) += -I$(srctree)/arch/mips/include/asm/mach-db1x00 11cflags-$(CONFIG_MIPS_DB1000) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
30load-$(CONFIG_MIPS_DB1000) += 0xffffffff80100000 12load-$(CONFIG_MIPS_DB1000) += 0xffffffff80100000
31 13
32# 14#
33# AMD Alchemy Db1200/Pb1200/Db1550/Db1300 eval boards 15# AMD Alchemy Db1200/Pb1200/Db1550/Pb1550/Db1300 eval boards
34# 16#
35platform-$(CONFIG_MIPS_DB1235) += alchemy/devboards/ 17platform-$(CONFIG_MIPS_DB1235) += alchemy/devboards/
36cflags-$(CONFIG_MIPS_DB1235) += -I$(srctree)/arch/mips/include/asm/mach-db1x00 18cflags-$(CONFIG_MIPS_DB1235) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
diff --git a/arch/mips/ar7/memory.c b/arch/mips/ar7/memory.c
index 28abfeef09d6..92dfa481205b 100644
--- a/arch/mips/ar7/memory.c
+++ b/arch/mips/ar7/memory.c
@@ -30,7 +30,6 @@
30#include <asm/sections.h> 30#include <asm/sections.h>
31 31
32#include <asm/mach-ar7/ar7.h> 32#include <asm/mach-ar7/ar7.h>
33#include <asm/mips-boards/prom.h>
34 33
35static int __init memsize(void) 34static int __init memsize(void)
36{ 35{
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index d5b3c9057018..a0233a2c1988 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -51,20 +51,6 @@ static void ath79_halt(void)
51 cpu_wait(); 51 cpu_wait();
52} 52}
53 53
54static void __init ath79_detect_mem_size(void)
55{
56 unsigned long size;
57
58 for (size = ATH79_MEM_SIZE_MIN; size < ATH79_MEM_SIZE_MAX;
59 size <<= 1) {
60 if (!memcmp(ath79_detect_mem_size,
61 ath79_detect_mem_size + size, 1024))
62 break;
63 }
64
65 add_memory_region(0, size, BOOT_MEM_RAM);
66}
67
68static void __init ath79_detect_sys_type(void) 54static void __init ath79_detect_sys_type(void)
69{ 55{
70 char *chip = "????"; 56 char *chip = "????";
@@ -212,7 +198,7 @@ void __init plat_mem_setup(void)
212 AR71XX_DDR_CTRL_SIZE); 198 AR71XX_DDR_CTRL_SIZE);
213 199
214 ath79_detect_sys_type(); 200 ath79_detect_sys_type();
215 ath79_detect_mem_size(); 201 detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
216 ath79_clocks_init(); 202 ath79_clocks_init();
217 203
218 _machine_restart = ath79_restart; 204 _machine_restart = ath79_restart;
diff --git a/arch/mips/bcm63xx/Kconfig b/arch/mips/bcm63xx/Kconfig
index d03e8799d1cf..5639662fd503 100644
--- a/arch/mips/bcm63xx/Kconfig
+++ b/arch/mips/bcm63xx/Kconfig
@@ -25,6 +25,10 @@ config BCM63XX_CPU_6358
25 bool "support 6358 CPU" 25 bool "support 6358 CPU"
26 select HW_HAS_PCI 26 select HW_HAS_PCI
27 27
28config BCM63XX_CPU_6362
29 bool "support 6362 CPU"
30 select HW_HAS_PCI
31
28config BCM63XX_CPU_6368 32config BCM63XX_CPU_6368
29 bool "support 6368 CPU" 33 bool "support 6368 CPU"
30 select HW_HAS_PCI 34 select HW_HAS_PCI
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index 9aa7d44898ed..a9505c4867e8 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -726,11 +726,11 @@ void __init board_prom_init(void)
726 u32 val; 726 u32 val;
727 727
728 /* read base address of boot chip select (0) 728 /* read base address of boot chip select (0)
729 * 6328 does not have MPI but boots from a fixed address 729 * 6328/6362 do not have MPI but boot from a fixed address
730 */ 730 */
731 if (BCMCPU_IS_6328()) 731 if (BCMCPU_IS_6328() || BCMCPU_IS_6362()) {
732 val = 0x18000000; 732 val = 0x18000000;
733 else { 733 } else {
734 val = bcm_mpi_readl(MPI_CSBASE_REG(0)); 734 val = bcm_mpi_readl(MPI_CSBASE_REG(0));
735 val &= MPI_CSBASE_BASE_MASK; 735 val &= MPI_CSBASE_BASE_MASK;
736 } 736 }
diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c
index b9e948d59430..c726a97fc798 100644
--- a/arch/mips/bcm63xx/clk.c
+++ b/arch/mips/bcm63xx/clk.c
@@ -15,7 +15,13 @@
15#include <bcm63xx_io.h> 15#include <bcm63xx_io.h>
16#include <bcm63xx_regs.h> 16#include <bcm63xx_regs.h>
17#include <bcm63xx_reset.h> 17#include <bcm63xx_reset.h>
18#include <bcm63xx_clk.h> 18
19struct clk {
20 void (*set)(struct clk *, int);
21 unsigned int rate;
22 unsigned int usage;
23 int id;
24};
19 25
20static DEFINE_MUTEX(clocks_mutex); 26static DEFINE_MUTEX(clocks_mutex);
21 27
@@ -119,11 +125,18 @@ static struct clk clk_ephy = {
119 */ 125 */
120static void enetsw_set(struct clk *clk, int enable) 126static void enetsw_set(struct clk *clk, int enable)
121{ 127{
122 if (!BCMCPU_IS_6368()) 128 if (BCMCPU_IS_6328())
129 bcm_hwclock_set(CKCTL_6328_ROBOSW_EN, enable);
130 else if (BCMCPU_IS_6362())
131 bcm_hwclock_set(CKCTL_6362_ROBOSW_EN, enable);
132 else if (BCMCPU_IS_6368())
133 bcm_hwclock_set(CKCTL_6368_ROBOSW_EN |
134 CKCTL_6368_SWPKT_USB_EN |
135 CKCTL_6368_SWPKT_SAR_EN,
136 enable);
137 else
123 return; 138 return;
124 bcm_hwclock_set(CKCTL_6368_ROBOSW_EN | 139
125 CKCTL_6368_SWPKT_USB_EN |
126 CKCTL_6368_SWPKT_SAR_EN, enable);
127 if (enable) { 140 if (enable) {
128 /* reset switch core afer clock change */ 141 /* reset switch core afer clock change */
129 bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1); 142 bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1);
@@ -160,6 +173,8 @@ static void usbh_set(struct clk *clk, int enable)
160 bcm_hwclock_set(CKCTL_6328_USBH_EN, enable); 173 bcm_hwclock_set(CKCTL_6328_USBH_EN, enable);
161 else if (BCMCPU_IS_6348()) 174 else if (BCMCPU_IS_6348())
162 bcm_hwclock_set(CKCTL_6348_USBH_EN, enable); 175 bcm_hwclock_set(CKCTL_6348_USBH_EN, enable);
176 else if (BCMCPU_IS_6362())
177 bcm_hwclock_set(CKCTL_6362_USBH_EN, enable);
163 else if (BCMCPU_IS_6368()) 178 else if (BCMCPU_IS_6368())
164 bcm_hwclock_set(CKCTL_6368_USBH_EN, enable); 179 bcm_hwclock_set(CKCTL_6368_USBH_EN, enable);
165} 180}
@@ -175,6 +190,8 @@ static void usbd_set(struct clk *clk, int enable)
175{ 190{
176 if (BCMCPU_IS_6328()) 191 if (BCMCPU_IS_6328())
177 bcm_hwclock_set(CKCTL_6328_USBD_EN, enable); 192 bcm_hwclock_set(CKCTL_6328_USBD_EN, enable);
193 else if (BCMCPU_IS_6362())
194 bcm_hwclock_set(CKCTL_6362_USBD_EN, enable);
178 else if (BCMCPU_IS_6368()) 195 else if (BCMCPU_IS_6368())
179 bcm_hwclock_set(CKCTL_6368_USBD_EN, enable); 196 bcm_hwclock_set(CKCTL_6368_USBD_EN, enable);
180} 197}
@@ -196,6 +213,8 @@ static void spi_set(struct clk *clk, int enable)
196 mask = CKCTL_6348_SPI_EN; 213 mask = CKCTL_6348_SPI_EN;
197 else if (BCMCPU_IS_6358()) 214 else if (BCMCPU_IS_6358())
198 mask = CKCTL_6358_SPI_EN; 215 mask = CKCTL_6358_SPI_EN;
216 else if (BCMCPU_IS_6362())
217 mask = CKCTL_6362_SPI_EN;
199 else 218 else
200 /* BCMCPU_IS_6368 */ 219 /* BCMCPU_IS_6368 */
201 mask = CKCTL_6368_SPI_EN; 220 mask = CKCTL_6368_SPI_EN;
@@ -236,7 +255,10 @@ static struct clk clk_xtm = {
236 */ 255 */
237static void ipsec_set(struct clk *clk, int enable) 256static void ipsec_set(struct clk *clk, int enable)
238{ 257{
239 bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable); 258 if (BCMCPU_IS_6362())
259 bcm_hwclock_set(CKCTL_6362_IPSEC_EN, enable);
260 else if (BCMCPU_IS_6368())
261 bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable);
240} 262}
241 263
242static struct clk clk_ipsec = { 264static struct clk clk_ipsec = {
@@ -249,7 +271,10 @@ static struct clk clk_ipsec = {
249 271
250static void pcie_set(struct clk *clk, int enable) 272static void pcie_set(struct clk *clk, int enable)
251{ 273{
252 bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable); 274 if (BCMCPU_IS_6328())
275 bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable);
276 else if (BCMCPU_IS_6362())
277 bcm_hwclock_set(CKCTL_6362_PCIE_EN, enable);
253} 278}
254 279
255static struct clk clk_pcie = { 280static struct clk clk_pcie = {
@@ -315,9 +340,9 @@ struct clk *clk_get(struct device *dev, const char *id)
315 return &clk_periph; 340 return &clk_periph;
316 if (BCMCPU_IS_6358() && !strcmp(id, "pcm")) 341 if (BCMCPU_IS_6358() && !strcmp(id, "pcm"))
317 return &clk_pcm; 342 return &clk_pcm;
318 if (BCMCPU_IS_6368() && !strcmp(id, "ipsec")) 343 if ((BCMCPU_IS_6362() || BCMCPU_IS_6368()) && !strcmp(id, "ipsec"))
319 return &clk_ipsec; 344 return &clk_ipsec;
320 if (BCMCPU_IS_6328() && !strcmp(id, "pcie")) 345 if ((BCMCPU_IS_6328() || BCMCPU_IS_6362()) && !strcmp(id, "pcie"))
321 return &clk_pcie; 346 return &clk_pcie;
322 return ERR_PTR(-ENOENT); 347 return ERR_PTR(-ENOENT);
323} 348}
diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c
index a7afb289b15a..79fe32df5e96 100644
--- a/arch/mips/bcm63xx/cpu.c
+++ b/arch/mips/bcm63xx/cpu.c
@@ -25,7 +25,7 @@ const int *bcm63xx_irqs;
25EXPORT_SYMBOL(bcm63xx_irqs); 25EXPORT_SYMBOL(bcm63xx_irqs);
26 26
27static u16 bcm63xx_cpu_id; 27static u16 bcm63xx_cpu_id;
28static u16 bcm63xx_cpu_rev; 28static u8 bcm63xx_cpu_rev;
29static unsigned int bcm63xx_cpu_freq; 29static unsigned int bcm63xx_cpu_freq;
30static unsigned int bcm63xx_memory_size; 30static unsigned int bcm63xx_memory_size;
31 31
@@ -71,6 +71,15 @@ static const int bcm6358_irqs[] = {
71 71
72}; 72};
73 73
74static const unsigned long bcm6362_regs_base[] = {
75 __GEN_CPU_REGS_TABLE(6362)
76};
77
78static const int bcm6362_irqs[] = {
79 __GEN_CPU_IRQ_TABLE(6362)
80
81};
82
74static const unsigned long bcm6368_regs_base[] = { 83static const unsigned long bcm6368_regs_base[] = {
75 __GEN_CPU_REGS_TABLE(6368) 84 __GEN_CPU_REGS_TABLE(6368)
76}; 85};
@@ -87,7 +96,7 @@ u16 __bcm63xx_get_cpu_id(void)
87 96
88EXPORT_SYMBOL(__bcm63xx_get_cpu_id); 97EXPORT_SYMBOL(__bcm63xx_get_cpu_id);
89 98
90u16 bcm63xx_get_cpu_rev(void) 99u8 bcm63xx_get_cpu_rev(void)
91{ 100{
92 return bcm63xx_cpu_rev; 101 return bcm63xx_cpu_rev;
93} 102}
@@ -169,6 +178,42 @@ static unsigned int detect_cpu_clock(void)
169 return (16 * 1000000 * n1 * n2) / m1; 178 return (16 * 1000000 * n1 * n2) / m1;
170 } 179 }
171 180
181 case BCM6362_CPU_ID:
182 {
183 unsigned int tmp, mips_pll_fcvo;
184
185 tmp = bcm_misc_readl(MISC_STRAPBUS_6362_REG);
186 mips_pll_fcvo = (tmp & STRAPBUS_6362_FCVO_MASK)
187 >> STRAPBUS_6362_FCVO_SHIFT;
188 switch (mips_pll_fcvo) {
189 case 0x03:
190 case 0x0b:
191 case 0x13:
192 case 0x1b:
193 return 240000000;
194 case 0x04:
195 case 0x0c:
196 case 0x14:
197 case 0x1c:
198 return 160000000;
199 case 0x05:
200 case 0x0e:
201 case 0x16:
202 case 0x1e:
203 case 0x1f:
204 return 400000000;
205 case 0x06:
206 return 440000000;
207 case 0x07:
208 case 0x17:
209 return 384000000;
210 case 0x15:
211 case 0x1d:
212 return 200000000;
213 default:
214 return 320000000;
215 }
216 }
172 case BCM6368_CPU_ID: 217 case BCM6368_CPU_ID:
173 { 218 {
174 unsigned int tmp, p1, p2, ndiv, m1; 219 unsigned int tmp, p1, p2, ndiv, m1;
@@ -205,7 +250,7 @@ static unsigned int detect_memory_size(void)
205 unsigned int cols = 0, rows = 0, is_32bits = 0, banks = 0; 250 unsigned int cols = 0, rows = 0, is_32bits = 0, banks = 0;
206 u32 val; 251 u32 val;
207 252
208 if (BCMCPU_IS_6328()) 253 if (BCMCPU_IS_6328() || BCMCPU_IS_6362())
209 return bcm_ddr_readl(DDR_CSEND_REG) << 24; 254 return bcm_ddr_readl(DDR_CSEND_REG) << 24;
210 255
211 if (BCMCPU_IS_6345()) { 256 if (BCMCPU_IS_6345()) {
@@ -240,53 +285,27 @@ static unsigned int detect_memory_size(void)
240 285
241void __init bcm63xx_cpu_init(void) 286void __init bcm63xx_cpu_init(void)
242{ 287{
243 unsigned int tmp, expected_cpu_id; 288 unsigned int tmp;
244 struct cpuinfo_mips *c = &current_cpu_data; 289 struct cpuinfo_mips *c = &current_cpu_data;
245 unsigned int cpu = smp_processor_id(); 290 unsigned int cpu = smp_processor_id();
291 u32 chipid_reg;
246 292
247 /* soc registers location depends on cpu type */ 293 /* soc registers location depends on cpu type */
248 expected_cpu_id = 0; 294 chipid_reg = 0;
249 295
250 switch (c->cputype) { 296 switch (c->cputype) {
251 case CPU_BMIPS3300: 297 case CPU_BMIPS3300:
252 if ((read_c0_prid() & 0xff00) == PRID_IMP_BMIPS3300_ALT) { 298 if ((read_c0_prid() & 0xff00) != PRID_IMP_BMIPS3300_ALT)
253 expected_cpu_id = BCM6348_CPU_ID;
254 bcm63xx_regs_base = bcm6348_regs_base;
255 bcm63xx_irqs = bcm6348_irqs;
256 } else {
257 __cpu_name[cpu] = "Broadcom BCM6338"; 299 __cpu_name[cpu] = "Broadcom BCM6338";
258 expected_cpu_id = BCM6338_CPU_ID; 300 /* fall-through */
259 bcm63xx_regs_base = bcm6338_regs_base;
260 bcm63xx_irqs = bcm6338_irqs;
261 }
262 break;
263 case CPU_BMIPS32: 301 case CPU_BMIPS32:
264 expected_cpu_id = BCM6345_CPU_ID; 302 chipid_reg = BCM_6345_PERF_BASE;
265 bcm63xx_regs_base = bcm6345_regs_base;
266 bcm63xx_irqs = bcm6345_irqs;
267 break; 303 break;
268 case CPU_BMIPS4350: 304 case CPU_BMIPS4350:
269 if ((read_c0_prid() & 0xf0) == 0x10) { 305 if ((read_c0_prid() & 0xf0) == 0x10)
270 expected_cpu_id = BCM6358_CPU_ID; 306 chipid_reg = BCM_6345_PERF_BASE;
271 bcm63xx_regs_base = bcm6358_regs_base; 307 else
272 bcm63xx_irqs = bcm6358_irqs; 308 chipid_reg = BCM_6368_PERF_BASE;
273 } else {
274 /* all newer chips have the same chip id location */
275 u16 chip_id = bcm_readw(BCM_6368_PERF_BASE);
276
277 switch (chip_id) {
278 case BCM6328_CPU_ID:
279 expected_cpu_id = BCM6328_CPU_ID;
280 bcm63xx_regs_base = bcm6328_regs_base;
281 bcm63xx_irqs = bcm6328_irqs;
282 break;
283 case BCM6368_CPU_ID:
284 expected_cpu_id = BCM6368_CPU_ID;
285 bcm63xx_regs_base = bcm6368_regs_base;
286 bcm63xx_irqs = bcm6368_irqs;
287 break;
288 }
289 }
290 break; 309 break;
291 } 310 }
292 311
@@ -294,20 +313,47 @@ void __init bcm63xx_cpu_init(void)
294 * really early to panic, but delaying panic would not help since we 313 * really early to panic, but delaying panic would not help since we
295 * will never get any working console 314 * will never get any working console
296 */ 315 */
297 if (!expected_cpu_id) 316 if (!chipid_reg)
298 panic("unsupported Broadcom CPU"); 317 panic("unsupported Broadcom CPU");
299 318
300 /* 319 /* read out CPU type */
301 * bcm63xx_regs_base is set, we can access soc registers 320 tmp = bcm_readl(chipid_reg);
302 */
303
304 /* double check CPU type */
305 tmp = bcm_perf_readl(PERF_REV_REG);
306 bcm63xx_cpu_id = (tmp & REV_CHIPID_MASK) >> REV_CHIPID_SHIFT; 321 bcm63xx_cpu_id = (tmp & REV_CHIPID_MASK) >> REV_CHIPID_SHIFT;
307 bcm63xx_cpu_rev = (tmp & REV_REVID_MASK) >> REV_REVID_SHIFT; 322 bcm63xx_cpu_rev = (tmp & REV_REVID_MASK) >> REV_REVID_SHIFT;
308 323
309 if (bcm63xx_cpu_id != expected_cpu_id) 324 switch (bcm63xx_cpu_id) {
310 panic("bcm63xx CPU id mismatch"); 325 case BCM6328_CPU_ID:
326 bcm63xx_regs_base = bcm6328_regs_base;
327 bcm63xx_irqs = bcm6328_irqs;
328 break;
329 case BCM6338_CPU_ID:
330 bcm63xx_regs_base = bcm6338_regs_base;
331 bcm63xx_irqs = bcm6338_irqs;
332 break;
333 case BCM6345_CPU_ID:
334 bcm63xx_regs_base = bcm6345_regs_base;
335 bcm63xx_irqs = bcm6345_irqs;
336 break;
337 case BCM6348_CPU_ID:
338 bcm63xx_regs_base = bcm6348_regs_base;
339 bcm63xx_irqs = bcm6348_irqs;
340 break;
341 case BCM6358_CPU_ID:
342 bcm63xx_regs_base = bcm6358_regs_base;
343 bcm63xx_irqs = bcm6358_irqs;
344 break;
345 case BCM6362_CPU_ID:
346 bcm63xx_regs_base = bcm6362_regs_base;
347 bcm63xx_irqs = bcm6362_irqs;
348 break;
349 case BCM6368_CPU_ID:
350 bcm63xx_regs_base = bcm6368_regs_base;
351 bcm63xx_irqs = bcm6368_irqs;
352 break;
353 default:
354 panic("unsupported broadcom CPU %x", bcm63xx_cpu_id);
355 break;
356 }
311 357
312 bcm63xx_cpu_freq = detect_cpu_clock(); 358 bcm63xx_cpu_freq = detect_cpu_clock();
313 bcm63xx_memory_size = detect_memory_size(); 359 bcm63xx_memory_size = detect_memory_size();
diff --git a/arch/mips/bcm63xx/dev-flash.c b/arch/mips/bcm63xx/dev-flash.c
index 58371c7deac2..588d1ec622e4 100644
--- a/arch/mips/bcm63xx/dev-flash.c
+++ b/arch/mips/bcm63xx/dev-flash.c
@@ -77,6 +77,12 @@ static int __init bcm63xx_detect_flash_type(void)
77 return BCM63XX_FLASH_TYPE_PARALLEL; 77 return BCM63XX_FLASH_TYPE_PARALLEL;
78 else 78 else
79 return BCM63XX_FLASH_TYPE_SERIAL; 79 return BCM63XX_FLASH_TYPE_SERIAL;
80 case BCM6362_CPU_ID:
81 val = bcm_misc_readl(MISC_STRAPBUS_6362_REG);
82 if (val & STRAPBUS_6362_BOOT_SEL_SERIAL)
83 return BCM63XX_FLASH_TYPE_SERIAL;
84 else
85 return BCM63XX_FLASH_TYPE_NAND;
80 case BCM6368_CPU_ID: 86 case BCM6368_CPU_ID:
81 val = bcm_gpio_readl(GPIO_STRAPBUS_REG); 87 val = bcm_gpio_readl(GPIO_STRAPBUS_REG);
82 switch (val & STRAPBUS_6368_BOOT_SEL_MASK) { 88 switch (val & STRAPBUS_6368_BOOT_SEL_MASK) {
diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c
index e97fd60e92ef..3065bb61820d 100644
--- a/arch/mips/bcm63xx/dev-spi.c
+++ b/arch/mips/bcm63xx/dev-spi.c
@@ -22,10 +22,6 @@
22/* 22/*
23 * register offsets 23 * register offsets
24 */ 24 */
25static const unsigned long bcm6338_regs_spi[] = {
26 __GEN_SPI_REGS_TABLE(6338)
27};
28
29static const unsigned long bcm6348_regs_spi[] = { 25static const unsigned long bcm6348_regs_spi[] = {
30 __GEN_SPI_REGS_TABLE(6348) 26 __GEN_SPI_REGS_TABLE(6348)
31}; 27};
@@ -34,23 +30,15 @@ static const unsigned long bcm6358_regs_spi[] = {
34 __GEN_SPI_REGS_TABLE(6358) 30 __GEN_SPI_REGS_TABLE(6358)
35}; 31};
36 32
37static const unsigned long bcm6368_regs_spi[] = {
38 __GEN_SPI_REGS_TABLE(6368)
39};
40
41const unsigned long *bcm63xx_regs_spi; 33const unsigned long *bcm63xx_regs_spi;
42EXPORT_SYMBOL(bcm63xx_regs_spi); 34EXPORT_SYMBOL(bcm63xx_regs_spi);
43 35
44static __init void bcm63xx_spi_regs_init(void) 36static __init void bcm63xx_spi_regs_init(void)
45{ 37{
46 if (BCMCPU_IS_6338()) 38 if (BCMCPU_IS_6338() || BCMCPU_IS_6348())
47 bcm63xx_regs_spi = bcm6338_regs_spi;
48 if (BCMCPU_IS_6348())
49 bcm63xx_regs_spi = bcm6348_regs_spi; 39 bcm63xx_regs_spi = bcm6348_regs_spi;
50 if (BCMCPU_IS_6358()) 40 if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368())
51 bcm63xx_regs_spi = bcm6358_regs_spi; 41 bcm63xx_regs_spi = bcm6358_regs_spi;
52 if (BCMCPU_IS_6368())
53 bcm63xx_regs_spi = bcm6368_regs_spi;
54} 42}
55#else 43#else
56static __init void bcm63xx_spi_regs_init(void) { } 44static __init void bcm63xx_spi_regs_init(void) { }
@@ -93,13 +81,13 @@ int __init bcm63xx_spi_register(void)
93 spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI); 81 spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI);
94 82
95 if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) { 83 if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
96 spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1; 84 spi_resources[0].end += BCM_6348_RSET_SPI_SIZE - 1;
97 spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE; 85 spi_pdata.fifo_size = SPI_6348_MSG_DATA_SIZE;
98 spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT; 86 spi_pdata.msg_type_shift = SPI_6348_MSG_TYPE_SHIFT;
99 spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH; 87 spi_pdata.msg_ctl_width = SPI_6348_MSG_CTL_WIDTH;
100 } 88 }
101 89
102 if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) { 90 if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) {
103 spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1; 91 spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;
104 spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE; 92 spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE;
105 spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT; 93 spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT;
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c
index da24c2bd9b7c..c0ab3887f42e 100644
--- a/arch/mips/bcm63xx/irq.c
+++ b/arch/mips/bcm63xx/irq.c
@@ -82,6 +82,17 @@ static void __internal_irq_unmask_64(unsigned int irq) __maybe_unused;
82#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6358 82#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6358
83#define ext_irq_cfg_reg2 0 83#define ext_irq_cfg_reg2 0
84#endif 84#endif
85#ifdef CONFIG_BCM63XX_CPU_6362
86#define irq_stat_reg PERF_IRQSTAT_6362_REG
87#define irq_mask_reg PERF_IRQMASK_6362_REG
88#define irq_bits 64
89#define is_ext_irq_cascaded 1
90#define ext_irq_start (BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE)
91#define ext_irq_end (BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE)
92#define ext_irq_count 4
93#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6362
94#define ext_irq_cfg_reg2 0
95#endif
85#ifdef CONFIG_BCM63XX_CPU_6368 96#ifdef CONFIG_BCM63XX_CPU_6368
86#define irq_stat_reg PERF_IRQSTAT_6368_REG 97#define irq_stat_reg PERF_IRQSTAT_6368_REG
87#define irq_mask_reg PERF_IRQMASK_6368_REG 98#define irq_mask_reg PERF_IRQMASK_6368_REG
@@ -170,6 +181,16 @@ static void bcm63xx_init_irq(void)
170 ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE; 181 ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
171 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358; 182 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
172 break; 183 break;
184 case BCM6362_CPU_ID:
185 irq_stat_addr += PERF_IRQSTAT_6362_REG;
186 irq_mask_addr += PERF_IRQMASK_6362_REG;
187 irq_bits = 64;
188 ext_irq_count = 4;
189 is_ext_irq_cascaded = 1;
190 ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
191 ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
192 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
193 break;
173 case BCM6368_CPU_ID: 194 case BCM6368_CPU_ID:
174 irq_stat_addr += PERF_IRQSTAT_6368_REG; 195 irq_stat_addr += PERF_IRQSTAT_6368_REG;
175 irq_mask_addr += PERF_IRQMASK_6368_REG; 196 irq_mask_addr += PERF_IRQMASK_6368_REG;
@@ -458,6 +479,7 @@ static int bcm63xx_external_irq_set_type(struct irq_data *d,
458 case BCM6338_CPU_ID: 479 case BCM6338_CPU_ID:
459 case BCM6345_CPU_ID: 480 case BCM6345_CPU_ID:
460 case BCM6358_CPU_ID: 481 case BCM6358_CPU_ID:
482 case BCM6362_CPU_ID:
461 case BCM6368_CPU_ID: 483 case BCM6368_CPU_ID:
462 if (levelsense) 484 if (levelsense)
463 reg |= EXTIRQ_CFG_LEVELSENSE(irq); 485 reg |= EXTIRQ_CFG_LEVELSENSE(irq);
diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c
index 10eaff458071..fd698087fbfd 100644
--- a/arch/mips/bcm63xx/prom.c
+++ b/arch/mips/bcm63xx/prom.c
@@ -36,6 +36,8 @@ void __init prom_init(void)
36 mask = CKCTL_6348_ALL_SAFE_EN; 36 mask = CKCTL_6348_ALL_SAFE_EN;
37 else if (BCMCPU_IS_6358()) 37 else if (BCMCPU_IS_6358())
38 mask = CKCTL_6358_ALL_SAFE_EN; 38 mask = CKCTL_6358_ALL_SAFE_EN;
39 else if (BCMCPU_IS_6362())
40 mask = CKCTL_6362_ALL_SAFE_EN;
39 else if (BCMCPU_IS_6368()) 41 else if (BCMCPU_IS_6368())
40 mask = CKCTL_6368_ALL_SAFE_EN; 42 mask = CKCTL_6368_ALL_SAFE_EN;
41 else 43 else
diff --git a/arch/mips/bcm63xx/reset.c b/arch/mips/bcm63xx/reset.c
index 68a31bb90cbf..317931c6cf58 100644
--- a/arch/mips/bcm63xx/reset.c
+++ b/arch/mips/bcm63xx/reset.c
@@ -85,6 +85,20 @@
85#define BCM6358_RESET_PCIE 0 85#define BCM6358_RESET_PCIE 0
86#define BCM6358_RESET_PCIE_EXT 0 86#define BCM6358_RESET_PCIE_EXT 0
87 87
88#define BCM6362_RESET_SPI SOFTRESET_6362_SPI_MASK
89#define BCM6362_RESET_ENET 0
90#define BCM6362_RESET_USBH SOFTRESET_6362_USBH_MASK
91#define BCM6362_RESET_USBD SOFTRESET_6362_USBS_MASK
92#define BCM6362_RESET_DSL 0
93#define BCM6362_RESET_SAR SOFTRESET_6362_SAR_MASK
94#define BCM6362_RESET_EPHY SOFTRESET_6362_EPHY_MASK
95#define BCM6362_RESET_ENETSW SOFTRESET_6362_ENETSW_MASK
96#define BCM6362_RESET_PCM SOFTRESET_6362_PCM_MASK
97#define BCM6362_RESET_MPI 0
98#define BCM6362_RESET_PCIE (SOFTRESET_6362_PCIE_MASK | \
99 SOFTRESET_6362_PCIE_CORE_MASK)
100#define BCM6362_RESET_PCIE_EXT SOFTRESET_6362_PCIE_EXT_MASK
101
88#define BCM6368_RESET_SPI SOFTRESET_6368_SPI_MASK 102#define BCM6368_RESET_SPI SOFTRESET_6368_SPI_MASK
89#define BCM6368_RESET_ENET 0 103#define BCM6368_RESET_ENET 0
90#define BCM6368_RESET_USBH SOFTRESET_6368_USBH_MASK 104#define BCM6368_RESET_USBH SOFTRESET_6368_USBH_MASK
@@ -119,6 +133,10 @@ static const u32 bcm6358_reset_bits[] = {
119 __GEN_RESET_BITS_TABLE(6358) 133 __GEN_RESET_BITS_TABLE(6358)
120}; 134};
121 135
136static const u32 bcm6362_reset_bits[] = {
137 __GEN_RESET_BITS_TABLE(6362)
138};
139
122static const u32 bcm6368_reset_bits[] = { 140static const u32 bcm6368_reset_bits[] = {
123 __GEN_RESET_BITS_TABLE(6368) 141 __GEN_RESET_BITS_TABLE(6368)
124}; 142};
@@ -140,6 +158,9 @@ static int __init bcm63xx_reset_bits_init(void)
140 } else if (BCMCPU_IS_6358()) { 158 } else if (BCMCPU_IS_6358()) {
141 reset_reg = PERF_SOFTRESET_6358_REG; 159 reset_reg = PERF_SOFTRESET_6358_REG;
142 bcm63xx_reset_bits = bcm6358_reset_bits; 160 bcm63xx_reset_bits = bcm6358_reset_bits;
161 } else if (BCMCPU_IS_6362()) {
162 reset_reg = PERF_SOFTRESET_6362_REG;
163 bcm63xx_reset_bits = bcm6362_reset_bits;
143 } else if (BCMCPU_IS_6368()) { 164 } else if (BCMCPU_IS_6368()) {
144 reset_reg = PERF_SOFTRESET_6368_REG; 165 reset_reg = PERF_SOFTRESET_6368_REG;
145 bcm63xx_reset_bits = bcm6368_reset_bits; 166 bcm63xx_reset_bits = bcm6368_reset_bits;
@@ -182,6 +203,13 @@ static const u32 bcm63xx_reset_bits[] = {
182#define reset_reg PERF_SOFTRESET_6358_REG 203#define reset_reg PERF_SOFTRESET_6358_REG
183#endif 204#endif
184 205
206#ifdef CONFIG_BCM63XX_CPU_6362
207static const u32 bcm63xx_reset_bits[] = {
208 __GEN_RESET_BITS_TABLE(6362)
209};
210#define reset_reg PERF_SOFTRESET_6362_REG
211#endif
212
185#ifdef CONFIG_BCM63XX_CPU_6368 213#ifdef CONFIG_BCM63XX_CPU_6368
186static const u32 bcm63xx_reset_bits[] = { 214static const u32 bcm63xx_reset_bits[] = {
187 __GEN_RESET_BITS_TABLE(6368) 215 __GEN_RESET_BITS_TABLE(6368)
diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c
index 35e18e98beb9..24a24445db64 100644
--- a/arch/mips/bcm63xx/setup.c
+++ b/arch/mips/bcm63xx/setup.c
@@ -83,6 +83,9 @@ void bcm63xx_machine_reboot(void)
83 case BCM6358_CPU_ID: 83 case BCM6358_CPU_ID:
84 perf_regs[0] = PERF_EXTIRQ_CFG_REG_6358; 84 perf_regs[0] = PERF_EXTIRQ_CFG_REG_6358;
85 break; 85 break;
86 case BCM6362_CPU_ID:
87 perf_regs[0] = PERF_EXTIRQ_CFG_REG_6362;
88 break;
86 } 89 }
87 90
88 for (i = 0; i < 2; i++) { 91 for (i = 0; i < 2; i++) {
@@ -126,7 +129,7 @@ static void __bcm63xx_machine_reboot(char *p)
126const char *get_system_type(void) 129const char *get_system_type(void)
127{ 130{
128 static char buf[128]; 131 static char buf[128];
129 snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%04X)", 132 snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%02X)",
130 board_get_name(), 133 board_get_name(),
131 bcm63xx_get_cpu_id(), bcm63xx_get_cpu_rev()); 134 bcm63xx_get_cpu_id(), bcm63xx_get_cpu_rev());
132 return buf; 135 return buf;
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 156aa6143e11..a22f06a6f7ca 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1032,9 +1032,8 @@ static int octeon_irq_gpio_map_common(struct irq_domain *d,
1032 if (!octeon_irq_virq_in_range(virq)) 1032 if (!octeon_irq_virq_in_range(virq))
1033 return -EINVAL; 1033 return -EINVAL;
1034 1034
1035 hw += gpiod->base_hwirq; 1035 line = (hw + gpiod->base_hwirq) >> 6;
1036 line = hw >> 6; 1036 bit = (hw + gpiod->base_hwirq) & 63;
1037 bit = hw & 63;
1038 if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0) 1037 if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0)
1039 return -EINVAL; 1038 return -EINVAL;
1040 1039
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index cd732e5b4fd5..ce1d3eeeb737 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -2,30 +2,21 @@ CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y 2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y 3CONFIG_CPU_MIPS32_R2=y
4CONFIG_MIPS_MT_SMP=y 4CONFIG_MIPS_MT_SMP=y
5CONFIG_NO_HZ=y
6CONFIG_HIGH_RES_TIMERS=y
7CONFIG_HZ_100=y 5CONFIG_HZ_100=y
8CONFIG_EXPERIMENTAL=y
9CONFIG_SYSVIPC=y 6CONFIG_SYSVIPC=y
7CONFIG_NO_HZ=y
8CONFIG_HIGH_RES_TIMERS=y
10CONFIG_LOG_BUF_SHIFT=15 9CONFIG_LOG_BUF_SHIFT=15
11CONFIG_SYSFS_DEPRECATED_V2=y
12CONFIG_RELAY=y
13CONFIG_NAMESPACES=y 10CONFIG_NAMESPACES=y
14CONFIG_UTS_NS=y 11CONFIG_RELAY=y
15CONFIG_IPC_NS=y
16CONFIG_PID_NS=y
17# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
18CONFIG_EXPERT=y 12CONFIG_EXPERT=y
19# CONFIG_SYSCTL_SYSCALL is not set
20# CONFIG_COMPAT_BRK is not set 13# CONFIG_COMPAT_BRK is not set
21CONFIG_SLAB=y 14CONFIG_SLAB=y
22CONFIG_MODULES=y 15CONFIG_MODULES=y
23CONFIG_MODULE_UNLOAD=y 16CONFIG_MODULE_UNLOAD=y
24CONFIG_MODVERSIONS=y 17CONFIG_MODVERSIONS=y
25CONFIG_MODULE_SRCVERSION_ALL=y 18CONFIG_MODULE_SRCVERSION_ALL=y
26# CONFIG_BLK_DEV_BSG is not set
27CONFIG_PCI=y 19CONFIG_PCI=y
28CONFIG_PM=y
29CONFIG_PACKET=y 20CONFIG_PACKET=y
30CONFIG_UNIX=y 21CONFIG_UNIX=y
31CONFIG_XFRM_USER=m 22CONFIG_XFRM_USER=m
@@ -41,8 +32,6 @@ CONFIG_IP_PNP=y
41CONFIG_IP_PNP_DHCP=y 32CONFIG_IP_PNP_DHCP=y
42CONFIG_IP_PNP_BOOTP=y 33CONFIG_IP_PNP_BOOTP=y
43CONFIG_NET_IPIP=m 34CONFIG_NET_IPIP=m
44CONFIG_NET_IPGRE=m
45CONFIG_NET_IPGRE_BROADCAST=y
46CONFIG_IP_MROUTE=y 35CONFIG_IP_MROUTE=y
47CONFIG_IP_PIMSM_V1=y 36CONFIG_IP_PIMSM_V1=y
48CONFIG_IP_PIMSM_V2=y 37CONFIG_IP_PIMSM_V2=y
@@ -65,7 +54,6 @@ CONFIG_IPV6_MROUTE=y
65CONFIG_IPV6_PIMSM_V2=y 54CONFIG_IPV6_PIMSM_V2=y
66CONFIG_NETWORK_SECMARK=y 55CONFIG_NETWORK_SECMARK=y
67CONFIG_NETFILTER=y 56CONFIG_NETFILTER=y
68CONFIG_NETFILTER_NETLINK_QUEUE=m
69CONFIG_NF_CONNTRACK=m 57CONFIG_NF_CONNTRACK=m
70CONFIG_NF_CONNTRACK_SECMARK=y 58CONFIG_NF_CONNTRACK_SECMARK=y
71CONFIG_NF_CONNTRACK_EVENTS=y 59CONFIG_NF_CONNTRACK_EVENTS=y
@@ -136,23 +124,15 @@ CONFIG_IP_VS_DH=m
136CONFIG_IP_VS_SH=m 124CONFIG_IP_VS_SH=m
137CONFIG_IP_VS_SED=m 125CONFIG_IP_VS_SED=m
138CONFIG_IP_VS_NQ=m 126CONFIG_IP_VS_NQ=m
139CONFIG_IP_VS_FTP=m
140CONFIG_NF_CONNTRACK_IPV4=m 127CONFIG_NF_CONNTRACK_IPV4=m
141CONFIG_IP_NF_QUEUE=m 128CONFIG_IP_NF_QUEUE=m
142CONFIG_IP_NF_IPTABLES=m 129CONFIG_IP_NF_IPTABLES=m
143CONFIG_IP_NF_MATCH_ADDRTYPE=m
144CONFIG_IP_NF_MATCH_AH=m 130CONFIG_IP_NF_MATCH_AH=m
145CONFIG_IP_NF_MATCH_ECN=m 131CONFIG_IP_NF_MATCH_ECN=m
146CONFIG_IP_NF_MATCH_TTL=m 132CONFIG_IP_NF_MATCH_TTL=m
147CONFIG_IP_NF_FILTER=m 133CONFIG_IP_NF_FILTER=m
148CONFIG_IP_NF_TARGET_REJECT=m 134CONFIG_IP_NF_TARGET_REJECT=m
149CONFIG_IP_NF_TARGET_LOG=m
150CONFIG_IP_NF_TARGET_ULOG=m 135CONFIG_IP_NF_TARGET_ULOG=m
151CONFIG_NF_NAT=m
152CONFIG_IP_NF_TARGET_MASQUERADE=m
153CONFIG_IP_NF_TARGET_NETMAP=m
154CONFIG_IP_NF_TARGET_REDIRECT=m
155CONFIG_NF_NAT_SNMP_BASIC=m
156CONFIG_IP_NF_MANGLE=m 136CONFIG_IP_NF_MANGLE=m
157CONFIG_IP_NF_TARGET_CLUSTERIP=m 137CONFIG_IP_NF_TARGET_CLUSTERIP=m
158CONFIG_IP_NF_TARGET_ECN=m 138CONFIG_IP_NF_TARGET_ECN=m
@@ -162,8 +142,6 @@ CONFIG_IP_NF_ARPTABLES=m
162CONFIG_IP_NF_ARPFILTER=m 142CONFIG_IP_NF_ARPFILTER=m
163CONFIG_IP_NF_ARP_MANGLE=m 143CONFIG_IP_NF_ARP_MANGLE=m
164CONFIG_NF_CONNTRACK_IPV6=m 144CONFIG_NF_CONNTRACK_IPV6=m
165CONFIG_IP6_NF_QUEUE=m
166CONFIG_IP6_NF_IPTABLES=m
167CONFIG_IP6_NF_MATCH_AH=m 145CONFIG_IP6_NF_MATCH_AH=m
168CONFIG_IP6_NF_MATCH_EUI64=m 146CONFIG_IP6_NF_MATCH_EUI64=m
169CONFIG_IP6_NF_MATCH_FRAG=m 147CONFIG_IP6_NF_MATCH_FRAG=m
@@ -173,7 +151,6 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m
173CONFIG_IP6_NF_MATCH_MH=m 151CONFIG_IP6_NF_MATCH_MH=m
174CONFIG_IP6_NF_MATCH_RT=m 152CONFIG_IP6_NF_MATCH_RT=m
175CONFIG_IP6_NF_TARGET_HL=m 153CONFIG_IP6_NF_TARGET_HL=m
176CONFIG_IP6_NF_TARGET_LOG=m
177CONFIG_IP6_NF_FILTER=m 154CONFIG_IP6_NF_FILTER=m
178CONFIG_IP6_NF_TARGET_REJECT=m 155CONFIG_IP6_NF_TARGET_REJECT=m
179CONFIG_IP6_NF_MANGLE=m 156CONFIG_IP6_NF_MANGLE=m
@@ -247,12 +224,10 @@ CONFIG_MAC80211=m
247CONFIG_MAC80211_RC_PID=y 224CONFIG_MAC80211_RC_PID=y
248CONFIG_MAC80211_RC_DEFAULT_PID=y 225CONFIG_MAC80211_RC_DEFAULT_PID=y
249CONFIG_MAC80211_MESH=y 226CONFIG_MAC80211_MESH=y
250CONFIG_MAC80211_LEDS=y
251CONFIG_RFKILL=m 227CONFIG_RFKILL=m
252CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 228CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
253CONFIG_CONNECTOR=m 229CONFIG_CONNECTOR=m
254CONFIG_MTD=y 230CONFIG_MTD=y
255CONFIG_MTD_PARTITIONS=y
256CONFIG_MTD_CHAR=y 231CONFIG_MTD_CHAR=y
257CONFIG_MTD_BLOCK=y 232CONFIG_MTD_BLOCK=y
258CONFIG_MTD_OOPS=m 233CONFIG_MTD_OOPS=m
@@ -271,7 +246,6 @@ CONFIG_BLK_DEV_NBD=m
271CONFIG_BLK_DEV_RAM=y 246CONFIG_BLK_DEV_RAM=y
272CONFIG_CDROM_PKTCDVD=m 247CONFIG_CDROM_PKTCDVD=m
273CONFIG_ATA_OVER_ETH=m 248CONFIG_ATA_OVER_ETH=m
274# CONFIG_MISC_DEVICES is not set
275CONFIG_IDE=y 249CONFIG_IDE=y
276CONFIG_BLK_DEV_IDECD=y 250CONFIG_BLK_DEV_IDECD=y
277CONFIG_IDE_GENERIC=y 251CONFIG_IDE_GENERIC=y
@@ -317,13 +291,19 @@ CONFIG_DM_MIRROR=m
317CONFIG_DM_ZERO=m 291CONFIG_DM_ZERO=m
318CONFIG_DM_MULTIPATH=m 292CONFIG_DM_MULTIPATH=m
319CONFIG_NETDEVICES=y 293CONFIG_NETDEVICES=y
320CONFIG_IFB=m
321CONFIG_DUMMY=m
322CONFIG_BONDING=m 294CONFIG_BONDING=m
323CONFIG_MACVLAN=m 295CONFIG_DUMMY=m
324CONFIG_EQUALIZER=m 296CONFIG_EQUALIZER=m
297CONFIG_IFB=m
298CONFIG_MACVLAN=m
325CONFIG_TUN=m 299CONFIG_TUN=m
326CONFIG_VETH=m 300CONFIG_VETH=m
301# CONFIG_NET_VENDOR_3COM is not set
302CONFIG_PCNET32=y
303CONFIG_CHELSIO_T3=m
304CONFIG_AX88796=m
305CONFIG_NETXEN_NIC=m
306CONFIG_TC35815=m
327CONFIG_MARVELL_PHY=m 307CONFIG_MARVELL_PHY=m
328CONFIG_DAVICOM_PHY=m 308CONFIG_DAVICOM_PHY=m
329CONFIG_QSEMI_PHY=m 309CONFIG_QSEMI_PHY=m
@@ -334,14 +314,6 @@ CONFIG_SMSC_PHY=m
334CONFIG_BROADCOM_PHY=m 314CONFIG_BROADCOM_PHY=m
335CONFIG_ICPLUS_PHY=m 315CONFIG_ICPLUS_PHY=m
336CONFIG_REALTEK_PHY=m 316CONFIG_REALTEK_PHY=m
337CONFIG_MDIO_BITBANG=m
338CONFIG_NET_ETHERNET=y
339CONFIG_AX88796=m
340CONFIG_NET_PCI=y
341CONFIG_PCNET32=y
342CONFIG_TC35815=m
343CONFIG_CHELSIO_T3=m
344CONFIG_NETXEN_NIC=m
345CONFIG_ATMEL=m 317CONFIG_ATMEL=m
346CONFIG_PCI_ATMEL=m 318CONFIG_PCI_ATMEL=m
347CONFIG_PRISM54=m 319CONFIG_PRISM54=m
@@ -352,15 +324,7 @@ CONFIG_HOSTAP_PLX=m
352CONFIG_HOSTAP_PCI=m 324CONFIG_HOSTAP_PCI=m
353CONFIG_IPW2100=m 325CONFIG_IPW2100=m
354CONFIG_IPW2100_MONITOR=y 326CONFIG_IPW2100_MONITOR=y
355CONFIG_IPW2200=m
356CONFIG_IPW2200_MONITOR=y
357CONFIG_IPW2200_PROMISCUOUS=y
358CONFIG_IPW2200_QOS=y
359CONFIG_LIBERTAS=m 327CONFIG_LIBERTAS=m
360CONFIG_HERMES=m
361CONFIG_PLX_HERMES=m
362CONFIG_TMD_HERMES=m
363CONFIG_NORTEL_HERMES=m
364# CONFIG_INPUT_KEYBOARD is not set 328# CONFIG_INPUT_KEYBOARD is not set
365# CONFIG_INPUT_MOUSE is not set 329# CONFIG_INPUT_MOUSE is not set
366# CONFIG_SERIO_I8042 is not set 330# CONFIG_SERIO_I8042 is not set
@@ -373,12 +337,6 @@ CONFIG_FB_CIRRUS=y
373# CONFIG_VGA_CONSOLE is not set 337# CONFIG_VGA_CONSOLE is not set
374CONFIG_FRAMEBUFFER_CONSOLE=y 338CONFIG_FRAMEBUFFER_CONSOLE=y
375CONFIG_HID=m 339CONFIG_HID=m
376CONFIG_LEDS_CLASS=y
377CONFIG_LEDS_TRIGGER_TIMER=m
378CONFIG_LEDS_TRIGGER_IDE_DISK=y
379CONFIG_LEDS_TRIGGER_HEARTBEAT=m
380CONFIG_LEDS_TRIGGER_BACKLIGHT=m
381CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
382CONFIG_RTC_CLASS=y 340CONFIG_RTC_CLASS=y
383CONFIG_RTC_DRV_CMOS=y 341CONFIG_RTC_DRV_CMOS=y
384CONFIG_UIO=m 342CONFIG_UIO=m
@@ -398,7 +356,6 @@ CONFIG_XFS_QUOTA=y
398CONFIG_XFS_POSIX_ACL=y 356CONFIG_XFS_POSIX_ACL=y
399CONFIG_QUOTA=y 357CONFIG_QUOTA=y
400CONFIG_QFMT_V2=y 358CONFIG_QFMT_V2=y
401CONFIG_AUTOFS_FS=y
402CONFIG_FUSE_FS=m 359CONFIG_FUSE_FS=m
403CONFIG_ISO9660_FS=m 360CONFIG_ISO9660_FS=m
404CONFIG_JOLIET=y 361CONFIG_JOLIET=y
@@ -425,7 +382,6 @@ CONFIG_ROMFS_FS=m
425CONFIG_SYSV_FS=m 382CONFIG_SYSV_FS=m
426CONFIG_UFS_FS=m 383CONFIG_UFS_FS=m
427CONFIG_NFS_FS=y 384CONFIG_NFS_FS=y
428CONFIG_NFS_V3=y
429CONFIG_ROOT_NFS=y 385CONFIG_ROOT_NFS=y
430CONFIG_NFSD=y 386CONFIG_NFSD=y
431CONFIG_NFSD_V3=y 387CONFIG_NFSD_V3=y
@@ -466,7 +422,6 @@ CONFIG_NLS_ISO8859_14=m
466CONFIG_NLS_ISO8859_15=m 422CONFIG_NLS_ISO8859_15=m
467CONFIG_NLS_KOI8_R=m 423CONFIG_NLS_KOI8_R=m
468CONFIG_NLS_KOI8_U=m 424CONFIG_NLS_KOI8_U=m
469# CONFIG_RCU_CPU_STALL_DETECTOR is not set
470CONFIG_CRYPTO_NULL=m 425CONFIG_CRYPTO_NULL=m
471CONFIG_CRYPTO_CRYPTD=m 426CONFIG_CRYPTO_CRYPTD=m
472CONFIG_CRYPTO_LRW=m 427CONFIG_CRYPTO_LRW=m
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
new file mode 100644
index 000000000000..341bb47204d6
--- /dev/null
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -0,0 +1,456 @@
1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y
4CONFIG_PAGE_SIZE_16KB=y
5CONFIG_MIPS_MT_SMP=y
6CONFIG_HZ_100=y
7CONFIG_SYSVIPC=y
8CONFIG_NO_HZ=y
9CONFIG_HIGH_RES_TIMERS=y
10CONFIG_LOG_BUF_SHIFT=15
11CONFIG_NAMESPACES=y
12CONFIG_RELAY=y
13CONFIG_EXPERT=y
14CONFIG_PERF_EVENTS=y
15# CONFIG_COMPAT_BRK is not set
16CONFIG_SLAB=y
17CONFIG_MODULES=y
18CONFIG_MODULE_UNLOAD=y
19CONFIG_MODVERSIONS=y
20CONFIG_MODULE_SRCVERSION_ALL=y
21CONFIG_PCI=y
22CONFIG_PACKET=y
23CONFIG_UNIX=y
24CONFIG_XFRM_USER=m
25CONFIG_NET_KEY=y
26CONFIG_NET_KEY_MIGRATE=y
27CONFIG_INET=y
28CONFIG_IP_MULTICAST=y
29CONFIG_IP_ADVANCED_ROUTER=y
30CONFIG_IP_MULTIPLE_TABLES=y
31CONFIG_IP_ROUTE_MULTIPATH=y
32CONFIG_IP_ROUTE_VERBOSE=y
33CONFIG_IP_PNP=y
34CONFIG_IP_PNP_DHCP=y
35CONFIG_IP_PNP_BOOTP=y
36CONFIG_NET_IPIP=m
37CONFIG_IP_MROUTE=y
38CONFIG_IP_PIMSM_V1=y
39CONFIG_IP_PIMSM_V2=y
40CONFIG_SYN_COOKIES=y
41CONFIG_INET_AH=m
42CONFIG_INET_ESP=m
43CONFIG_INET_IPCOMP=m
44CONFIG_INET_XFRM_MODE_TRANSPORT=m
45CONFIG_INET_XFRM_MODE_TUNNEL=m
46CONFIG_TCP_MD5SIG=y
47CONFIG_IPV6_PRIVACY=y
48CONFIG_IPV6_ROUTER_PREF=y
49CONFIG_IPV6_ROUTE_INFO=y
50CONFIG_IPV6_OPTIMISTIC_DAD=y
51CONFIG_INET6_AH=m
52CONFIG_INET6_ESP=m
53CONFIG_INET6_IPCOMP=m
54CONFIG_IPV6_TUNNEL=m
55CONFIG_IPV6_MROUTE=y
56CONFIG_IPV6_PIMSM_V2=y
57CONFIG_NETWORK_SECMARK=y
58CONFIG_NETFILTER=y
59CONFIG_NF_CONNTRACK=m
60CONFIG_NF_CONNTRACK_SECMARK=y
61CONFIG_NF_CONNTRACK_EVENTS=y
62CONFIG_NF_CT_PROTO_DCCP=m
63CONFIG_NF_CT_PROTO_UDPLITE=m
64CONFIG_NF_CONNTRACK_AMANDA=m
65CONFIG_NF_CONNTRACK_FTP=m
66CONFIG_NF_CONNTRACK_H323=m
67CONFIG_NF_CONNTRACK_IRC=m
68CONFIG_NF_CONNTRACK_PPTP=m
69CONFIG_NF_CONNTRACK_SANE=m
70CONFIG_NF_CONNTRACK_SIP=m
71CONFIG_NF_CONNTRACK_TFTP=m
72CONFIG_NF_CT_NETLINK=m
73CONFIG_NETFILTER_TPROXY=m
74CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
75CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
76CONFIG_NETFILTER_XT_TARGET_MARK=m
77CONFIG_NETFILTER_XT_TARGET_NFLOG=m
78CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
79CONFIG_NETFILTER_XT_TARGET_TPROXY=m
80CONFIG_NETFILTER_XT_TARGET_TRACE=m
81CONFIG_NETFILTER_XT_TARGET_SECMARK=m
82CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
83CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
84CONFIG_NETFILTER_XT_MATCH_COMMENT=m
85CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
86CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
87CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
88CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
89CONFIG_NETFILTER_XT_MATCH_DCCP=m
90CONFIG_NETFILTER_XT_MATCH_ESP=m
91CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
92CONFIG_NETFILTER_XT_MATCH_HELPER=m
93CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
94CONFIG_NETFILTER_XT_MATCH_LENGTH=m
95CONFIG_NETFILTER_XT_MATCH_LIMIT=m
96CONFIG_NETFILTER_XT_MATCH_MAC=m
97CONFIG_NETFILTER_XT_MATCH_MARK=m
98CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
99CONFIG_NETFILTER_XT_MATCH_OWNER=m
100CONFIG_NETFILTER_XT_MATCH_POLICY=m
101CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
102CONFIG_NETFILTER_XT_MATCH_QUOTA=m
103CONFIG_NETFILTER_XT_MATCH_RATEEST=m
104CONFIG_NETFILTER_XT_MATCH_REALM=m
105CONFIG_NETFILTER_XT_MATCH_RECENT=m
106CONFIG_NETFILTER_XT_MATCH_SOCKET=m
107CONFIG_NETFILTER_XT_MATCH_STATE=m
108CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
109CONFIG_NETFILTER_XT_MATCH_STRING=m
110CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
111CONFIG_NETFILTER_XT_MATCH_TIME=m
112CONFIG_NETFILTER_XT_MATCH_U32=m
113CONFIG_IP_VS=m
114CONFIG_IP_VS_IPV6=y
115CONFIG_IP_VS_PROTO_TCP=y
116CONFIG_IP_VS_PROTO_UDP=y
117CONFIG_IP_VS_PROTO_ESP=y
118CONFIG_IP_VS_PROTO_AH=y
119CONFIG_IP_VS_RR=m
120CONFIG_IP_VS_WRR=m
121CONFIG_IP_VS_LC=m
122CONFIG_IP_VS_WLC=m
123CONFIG_IP_VS_LBLC=m
124CONFIG_IP_VS_LBLCR=m
125CONFIG_IP_VS_DH=m
126CONFIG_IP_VS_SH=m
127CONFIG_IP_VS_SED=m
128CONFIG_IP_VS_NQ=m
129CONFIG_NF_CONNTRACK_IPV4=m
130CONFIG_IP_NF_QUEUE=m
131CONFIG_IP_NF_IPTABLES=m
132CONFIG_IP_NF_MATCH_AH=m
133CONFIG_IP_NF_MATCH_ECN=m
134CONFIG_IP_NF_MATCH_TTL=m
135CONFIG_IP_NF_FILTER=m
136CONFIG_IP_NF_TARGET_REJECT=m
137CONFIG_IP_NF_TARGET_ULOG=m
138CONFIG_IP_NF_MANGLE=m
139CONFIG_IP_NF_TARGET_CLUSTERIP=m
140CONFIG_IP_NF_TARGET_ECN=m
141CONFIG_IP_NF_TARGET_TTL=m
142CONFIG_IP_NF_RAW=m
143CONFIG_IP_NF_ARPTABLES=m
144CONFIG_IP_NF_ARPFILTER=m
145CONFIG_IP_NF_ARP_MANGLE=m
146CONFIG_NF_CONNTRACK_IPV6=m
147CONFIG_IP6_NF_MATCH_AH=m
148CONFIG_IP6_NF_MATCH_EUI64=m
149CONFIG_IP6_NF_MATCH_FRAG=m
150CONFIG_IP6_NF_MATCH_OPTS=m
151CONFIG_IP6_NF_MATCH_HL=m
152CONFIG_IP6_NF_MATCH_IPV6HEADER=m
153CONFIG_IP6_NF_MATCH_MH=m
154CONFIG_IP6_NF_MATCH_RT=m
155CONFIG_IP6_NF_TARGET_HL=m
156CONFIG_IP6_NF_FILTER=m
157CONFIG_IP6_NF_TARGET_REJECT=m
158CONFIG_IP6_NF_MANGLE=m
159CONFIG_IP6_NF_RAW=m
160CONFIG_BRIDGE_NF_EBTABLES=m
161CONFIG_BRIDGE_EBT_BROUTE=m
162CONFIG_BRIDGE_EBT_T_FILTER=m
163CONFIG_BRIDGE_EBT_T_NAT=m
164CONFIG_BRIDGE_EBT_802_3=m
165CONFIG_BRIDGE_EBT_AMONG=m
166CONFIG_BRIDGE_EBT_ARP=m
167CONFIG_BRIDGE_EBT_IP=m
168CONFIG_BRIDGE_EBT_IP6=m
169CONFIG_BRIDGE_EBT_LIMIT=m
170CONFIG_BRIDGE_EBT_MARK=m
171CONFIG_BRIDGE_EBT_PKTTYPE=m
172CONFIG_BRIDGE_EBT_STP=m
173CONFIG_BRIDGE_EBT_VLAN=m
174CONFIG_BRIDGE_EBT_ARPREPLY=m
175CONFIG_BRIDGE_EBT_DNAT=m
176CONFIG_BRIDGE_EBT_MARK_T=m
177CONFIG_BRIDGE_EBT_REDIRECT=m
178CONFIG_BRIDGE_EBT_SNAT=m
179CONFIG_BRIDGE_EBT_LOG=m
180CONFIG_BRIDGE_EBT_ULOG=m
181CONFIG_BRIDGE_EBT_NFLOG=m
182CONFIG_IP_SCTP=m
183CONFIG_BRIDGE=m
184CONFIG_VLAN_8021Q=m
185CONFIG_VLAN_8021Q_GVRP=y
186CONFIG_ATALK=m
187CONFIG_DEV_APPLETALK=m
188CONFIG_IPDDP=m
189CONFIG_IPDDP_ENCAP=y
190CONFIG_IPDDP_DECAP=y
191CONFIG_PHONET=m
192CONFIG_NET_SCHED=y
193CONFIG_NET_SCH_CBQ=m
194CONFIG_NET_SCH_HTB=m
195CONFIG_NET_SCH_HFSC=m
196CONFIG_NET_SCH_PRIO=m
197CONFIG_NET_SCH_RED=m
198CONFIG_NET_SCH_SFQ=m
199CONFIG_NET_SCH_TEQL=m
200CONFIG_NET_SCH_TBF=m
201CONFIG_NET_SCH_GRED=m
202CONFIG_NET_SCH_DSMARK=m
203CONFIG_NET_SCH_NETEM=m
204CONFIG_NET_SCH_INGRESS=m
205CONFIG_NET_CLS_BASIC=m
206CONFIG_NET_CLS_TCINDEX=m
207CONFIG_NET_CLS_ROUTE4=m
208CONFIG_NET_CLS_FW=m
209CONFIG_NET_CLS_U32=m
210CONFIG_NET_CLS_RSVP=m
211CONFIG_NET_CLS_RSVP6=m
212CONFIG_NET_CLS_FLOW=m
213CONFIG_NET_CLS_ACT=y
214CONFIG_NET_ACT_POLICE=y
215CONFIG_NET_ACT_GACT=m
216CONFIG_GACT_PROB=y
217CONFIG_NET_ACT_MIRRED=m
218CONFIG_NET_ACT_IPT=m
219CONFIG_NET_ACT_NAT=m
220CONFIG_NET_ACT_PEDIT=m
221CONFIG_NET_ACT_SIMP=m
222CONFIG_NET_ACT_SKBEDIT=m
223CONFIG_NET_CLS_IND=y
224CONFIG_CFG80211=m
225CONFIG_MAC80211=m
226CONFIG_MAC80211_RC_PID=y
227CONFIG_MAC80211_RC_DEFAULT_PID=y
228CONFIG_MAC80211_MESH=y
229CONFIG_RFKILL=m
230CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
231CONFIG_CONNECTOR=m
232CONFIG_MTD=y
233CONFIG_MTD_CHAR=y
234CONFIG_MTD_BLOCK=y
235CONFIG_MTD_OOPS=m
236CONFIG_MTD_CFI=y
237CONFIG_MTD_CFI_INTELEXT=y
238CONFIG_MTD_CFI_AMDSTD=y
239CONFIG_MTD_CFI_STAA=y
240CONFIG_MTD_PHYSMAP=y
241CONFIG_MTD_UBI=m
242CONFIG_MTD_UBI_GLUEBI=m
243CONFIG_BLK_DEV_FD=m
244CONFIG_BLK_DEV_UMEM=m
245CONFIG_BLK_DEV_LOOP=m
246CONFIG_BLK_DEV_CRYPTOLOOP=m
247CONFIG_BLK_DEV_NBD=m
248CONFIG_BLK_DEV_RAM=y
249CONFIG_CDROM_PKTCDVD=m
250CONFIG_ATA_OVER_ETH=m
251CONFIG_IDE=y
252CONFIG_BLK_DEV_IDECD=y
253CONFIG_IDE_GENERIC=y
254CONFIG_BLK_DEV_GENERIC=y
255CONFIG_BLK_DEV_PIIX=y
256CONFIG_BLK_DEV_IT8213=m
257CONFIG_BLK_DEV_TC86C001=m
258CONFIG_RAID_ATTRS=m
259CONFIG_SCSI=m
260CONFIG_SCSI_TGT=m
261CONFIG_BLK_DEV_SD=m
262CONFIG_CHR_DEV_ST=m
263CONFIG_CHR_DEV_OSST=m
264CONFIG_BLK_DEV_SR=m
265CONFIG_BLK_DEV_SR_VENDOR=y
266CONFIG_CHR_DEV_SG=m
267CONFIG_SCSI_MULTI_LUN=y
268CONFIG_SCSI_CONSTANTS=y
269CONFIG_SCSI_LOGGING=y
270CONFIG_SCSI_SCAN_ASYNC=y
271CONFIG_SCSI_FC_ATTRS=m
272CONFIG_ISCSI_TCP=m
273CONFIG_BLK_DEV_3W_XXXX_RAID=m
274CONFIG_SCSI_3W_9XXX=m
275CONFIG_SCSI_ACARD=m
276CONFIG_SCSI_AACRAID=m
277CONFIG_SCSI_AIC7XXX=m
278CONFIG_AIC7XXX_RESET_DELAY_MS=15000
279# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
280CONFIG_MD=y
281CONFIG_BLK_DEV_MD=m
282CONFIG_MD_LINEAR=m
283CONFIG_MD_RAID0=m
284CONFIG_MD_RAID1=m
285CONFIG_MD_RAID10=m
286CONFIG_MD_RAID456=m
287CONFIG_MD_MULTIPATH=m
288CONFIG_MD_FAULTY=m
289CONFIG_BLK_DEV_DM=m
290CONFIG_DM_CRYPT=m
291CONFIG_DM_SNAPSHOT=m
292CONFIG_DM_MIRROR=m
293CONFIG_DM_ZERO=m
294CONFIG_DM_MULTIPATH=m
295CONFIG_NETDEVICES=y
296CONFIG_BONDING=m
297CONFIG_DUMMY=m
298CONFIG_EQUALIZER=m
299CONFIG_IFB=m
300CONFIG_MACVLAN=m
301CONFIG_TUN=m
302CONFIG_VETH=m
303CONFIG_PCNET32=y
304CONFIG_CHELSIO_T3=m
305CONFIG_AX88796=m
306CONFIG_NETXEN_NIC=m
307CONFIG_TC35815=m
308CONFIG_MARVELL_PHY=m
309CONFIG_DAVICOM_PHY=m
310CONFIG_QSEMI_PHY=m
311CONFIG_LXT_PHY=m
312CONFIG_CICADA_PHY=m
313CONFIG_VITESSE_PHY=m
314CONFIG_SMSC_PHY=m
315CONFIG_BROADCOM_PHY=m
316CONFIG_ICPLUS_PHY=m
317CONFIG_REALTEK_PHY=m
318CONFIG_ATMEL=m
319CONFIG_PCI_ATMEL=m
320CONFIG_PRISM54=m
321CONFIG_HOSTAP=m
322CONFIG_HOSTAP_FIRMWARE=y
323CONFIG_HOSTAP_FIRMWARE_NVRAM=y
324CONFIG_HOSTAP_PLX=m
325CONFIG_HOSTAP_PCI=m
326CONFIG_IPW2100=m
327CONFIG_IPW2100_MONITOR=y
328CONFIG_LIBERTAS=m
329# CONFIG_INPUT_KEYBOARD is not set
330# CONFIG_INPUT_MOUSE is not set
331# CONFIG_SERIO_I8042 is not set
332CONFIG_VT_HW_CONSOLE_BINDING=y
333CONFIG_SERIAL_8250=y
334CONFIG_SERIAL_8250_CONSOLE=y
335# CONFIG_HWMON is not set
336CONFIG_FB=y
337CONFIG_FB_CIRRUS=y
338# CONFIG_VGA_CONSOLE is not set
339CONFIG_FRAMEBUFFER_CONSOLE=y
340CONFIG_HID=m
341CONFIG_RTC_CLASS=y
342CONFIG_RTC_DRV_CMOS=y
343CONFIG_UIO=m
344CONFIG_UIO_CIF=m
345CONFIG_EXT2_FS=y
346CONFIG_EXT3_FS=y
347CONFIG_REISERFS_FS=m
348CONFIG_REISERFS_PROC_INFO=y
349CONFIG_REISERFS_FS_XATTR=y
350CONFIG_REISERFS_FS_POSIX_ACL=y
351CONFIG_REISERFS_FS_SECURITY=y
352CONFIG_JFS_FS=m
353CONFIG_JFS_POSIX_ACL=y
354CONFIG_JFS_SECURITY=y
355CONFIG_XFS_FS=m
356CONFIG_XFS_QUOTA=y
357CONFIG_XFS_POSIX_ACL=y
358CONFIG_QUOTA=y
359CONFIG_QFMT_V2=y
360CONFIG_FUSE_FS=m
361CONFIG_ISO9660_FS=m
362CONFIG_JOLIET=y
363CONFIG_ZISOFS=y
364CONFIG_UDF_FS=m
365CONFIG_MSDOS_FS=m
366CONFIG_VFAT_FS=m
367CONFIG_PROC_KCORE=y
368CONFIG_TMPFS=y
369CONFIG_CONFIGFS_FS=y
370CONFIG_AFFS_FS=m
371CONFIG_HFS_FS=m
372CONFIG_HFSPLUS_FS=m
373CONFIG_BEFS_FS=m
374CONFIG_BFS_FS=m
375CONFIG_EFS_FS=m
376CONFIG_JFFS2_FS=m
377CONFIG_JFFS2_FS_XATTR=y
378CONFIG_JFFS2_COMPRESSION_OPTIONS=y
379CONFIG_JFFS2_RUBIN=y
380CONFIG_CRAMFS=m
381CONFIG_VXFS_FS=m
382CONFIG_MINIX_FS=m
383CONFIG_ROMFS_FS=m
384CONFIG_SYSV_FS=m
385CONFIG_UFS_FS=m
386CONFIG_NFS_FS=y
387CONFIG_ROOT_NFS=y
388CONFIG_NFSD=y
389CONFIG_NFSD_V3=y
390CONFIG_NLS_CODEPAGE_437=m
391CONFIG_NLS_CODEPAGE_737=m
392CONFIG_NLS_CODEPAGE_775=m
393CONFIG_NLS_CODEPAGE_850=m
394CONFIG_NLS_CODEPAGE_852=m
395CONFIG_NLS_CODEPAGE_855=m
396CONFIG_NLS_CODEPAGE_857=m
397CONFIG_NLS_CODEPAGE_860=m
398CONFIG_NLS_CODEPAGE_861=m
399CONFIG_NLS_CODEPAGE_862=m
400CONFIG_NLS_CODEPAGE_863=m
401CONFIG_NLS_CODEPAGE_864=m
402CONFIG_NLS_CODEPAGE_865=m
403CONFIG_NLS_CODEPAGE_866=m
404CONFIG_NLS_CODEPAGE_869=m
405CONFIG_NLS_CODEPAGE_936=m
406CONFIG_NLS_CODEPAGE_950=m
407CONFIG_NLS_CODEPAGE_932=m
408CONFIG_NLS_CODEPAGE_949=m
409CONFIG_NLS_CODEPAGE_874=m
410CONFIG_NLS_ISO8859_8=m
411CONFIG_NLS_CODEPAGE_1250=m
412CONFIG_NLS_CODEPAGE_1251=m
413CONFIG_NLS_ASCII=m
414CONFIG_NLS_ISO8859_1=m
415CONFIG_NLS_ISO8859_2=m
416CONFIG_NLS_ISO8859_3=m
417CONFIG_NLS_ISO8859_4=m
418CONFIG_NLS_ISO8859_5=m
419CONFIG_NLS_ISO8859_6=m
420CONFIG_NLS_ISO8859_7=m
421CONFIG_NLS_ISO8859_9=m
422CONFIG_NLS_ISO8859_13=m
423CONFIG_NLS_ISO8859_14=m
424CONFIG_NLS_ISO8859_15=m
425CONFIG_NLS_KOI8_R=m
426CONFIG_NLS_KOI8_U=m
427CONFIG_RCU_CPU_STALL_TIMEOUT=60
428CONFIG_ENABLE_DEFAULT_TRACERS=y
429CONFIG_CRYPTO_NULL=m
430CONFIG_CRYPTO_CRYPTD=m
431CONFIG_CRYPTO_LRW=m
432CONFIG_CRYPTO_PCBC=m
433CONFIG_CRYPTO_HMAC=y
434CONFIG_CRYPTO_XCBC=m
435CONFIG_CRYPTO_MD4=m
436CONFIG_CRYPTO_SHA256=m
437CONFIG_CRYPTO_SHA512=m
438CONFIG_CRYPTO_TGR192=m
439CONFIG_CRYPTO_WP512=m
440CONFIG_CRYPTO_ANUBIS=m
441CONFIG_CRYPTO_BLOWFISH=m
442CONFIG_CRYPTO_CAMELLIA=m
443CONFIG_CRYPTO_CAST5=m
444CONFIG_CRYPTO_CAST6=m
445CONFIG_CRYPTO_FCRYPT=m
446CONFIG_CRYPTO_KHAZAD=m
447CONFIG_CRYPTO_SERPENT=m
448CONFIG_CRYPTO_TEA=m
449CONFIG_CRYPTO_TWOFISH=m
450# CONFIG_CRYPTO_ANSI_CPRNG is not set
451CONFIG_CRC16=m
452CONFIG_VIRTUALIZATION=y
453CONFIG_KVM=m
454CONFIG_KVM_MIPS_DYN_TRANS=y
455CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y
456CONFIG_VHOST_NET=m
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
new file mode 100644
index 000000000000..2b8558b71080
--- /dev/null
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -0,0 +1,453 @@
1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y
4CONFIG_KVM_GUEST=y
5CONFIG_PAGE_SIZE_16KB=y
6CONFIG_HZ_100=y
7CONFIG_SYSVIPC=y
8CONFIG_NO_HZ=y
9CONFIG_HIGH_RES_TIMERS=y
10CONFIG_LOG_BUF_SHIFT=15
11CONFIG_NAMESPACES=y
12CONFIG_RELAY=y
13CONFIG_BLK_DEV_INITRD=y
14CONFIG_EXPERT=y
15# CONFIG_COMPAT_BRK is not set
16CONFIG_SLAB=y
17CONFIG_MODULES=y
18CONFIG_MODULE_UNLOAD=y
19CONFIG_MODVERSIONS=y
20CONFIG_MODULE_SRCVERSION_ALL=y
21CONFIG_PCI=y
22CONFIG_PACKET=y
23CONFIG_UNIX=y
24CONFIG_XFRM_USER=m
25CONFIG_NET_KEY=y
26CONFIG_NET_KEY_MIGRATE=y
27CONFIG_INET=y
28CONFIG_IP_MULTICAST=y
29CONFIG_IP_ADVANCED_ROUTER=y
30CONFIG_IP_MULTIPLE_TABLES=y
31CONFIG_IP_ROUTE_MULTIPATH=y
32CONFIG_IP_ROUTE_VERBOSE=y
33CONFIG_IP_PNP=y
34CONFIG_IP_PNP_DHCP=y
35CONFIG_IP_PNP_BOOTP=y
36CONFIG_NET_IPIP=m
37CONFIG_IP_MROUTE=y
38CONFIG_IP_PIMSM_V1=y
39CONFIG_IP_PIMSM_V2=y
40CONFIG_SYN_COOKIES=y
41CONFIG_INET_AH=m
42CONFIG_INET_ESP=m
43CONFIG_INET_IPCOMP=m
44CONFIG_INET_XFRM_MODE_TRANSPORT=m
45CONFIG_INET_XFRM_MODE_TUNNEL=m
46CONFIG_TCP_MD5SIG=y
47CONFIG_IPV6_PRIVACY=y
48CONFIG_IPV6_ROUTER_PREF=y
49CONFIG_IPV6_ROUTE_INFO=y
50CONFIG_IPV6_OPTIMISTIC_DAD=y
51CONFIG_INET6_AH=m
52CONFIG_INET6_ESP=m
53CONFIG_INET6_IPCOMP=m
54CONFIG_IPV6_TUNNEL=m
55CONFIG_IPV6_MROUTE=y
56CONFIG_IPV6_PIMSM_V2=y
57CONFIG_NETWORK_SECMARK=y
58CONFIG_NETFILTER=y
59CONFIG_NF_CONNTRACK=m
60CONFIG_NF_CONNTRACK_SECMARK=y
61CONFIG_NF_CONNTRACK_EVENTS=y
62CONFIG_NF_CT_PROTO_DCCP=m
63CONFIG_NF_CT_PROTO_UDPLITE=m
64CONFIG_NF_CONNTRACK_AMANDA=m
65CONFIG_NF_CONNTRACK_FTP=m
66CONFIG_NF_CONNTRACK_H323=m
67CONFIG_NF_CONNTRACK_IRC=m
68CONFIG_NF_CONNTRACK_PPTP=m
69CONFIG_NF_CONNTRACK_SANE=m
70CONFIG_NF_CONNTRACK_SIP=m
71CONFIG_NF_CONNTRACK_TFTP=m
72CONFIG_NF_CT_NETLINK=m
73CONFIG_NETFILTER_TPROXY=m
74CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
75CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
76CONFIG_NETFILTER_XT_TARGET_MARK=m
77CONFIG_NETFILTER_XT_TARGET_NFLOG=m
78CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
79CONFIG_NETFILTER_XT_TARGET_TPROXY=m
80CONFIG_NETFILTER_XT_TARGET_TRACE=m
81CONFIG_NETFILTER_XT_TARGET_SECMARK=m
82CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
83CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
84CONFIG_NETFILTER_XT_MATCH_COMMENT=m
85CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
86CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
87CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
88CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
89CONFIG_NETFILTER_XT_MATCH_DCCP=m
90CONFIG_NETFILTER_XT_MATCH_ESP=m
91CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
92CONFIG_NETFILTER_XT_MATCH_HELPER=m
93CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
94CONFIG_NETFILTER_XT_MATCH_LENGTH=m
95CONFIG_NETFILTER_XT_MATCH_LIMIT=m
96CONFIG_NETFILTER_XT_MATCH_MAC=m
97CONFIG_NETFILTER_XT_MATCH_MARK=m
98CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
99CONFIG_NETFILTER_XT_MATCH_OWNER=m
100CONFIG_NETFILTER_XT_MATCH_POLICY=m
101CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
102CONFIG_NETFILTER_XT_MATCH_QUOTA=m
103CONFIG_NETFILTER_XT_MATCH_RATEEST=m
104CONFIG_NETFILTER_XT_MATCH_REALM=m
105CONFIG_NETFILTER_XT_MATCH_RECENT=m
106CONFIG_NETFILTER_XT_MATCH_SOCKET=m
107CONFIG_NETFILTER_XT_MATCH_STATE=m
108CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
109CONFIG_NETFILTER_XT_MATCH_STRING=m
110CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
111CONFIG_NETFILTER_XT_MATCH_TIME=m
112CONFIG_NETFILTER_XT_MATCH_U32=m
113CONFIG_IP_VS=m
114CONFIG_IP_VS_IPV6=y
115CONFIG_IP_VS_PROTO_TCP=y
116CONFIG_IP_VS_PROTO_UDP=y
117CONFIG_IP_VS_PROTO_ESP=y
118CONFIG_IP_VS_PROTO_AH=y
119CONFIG_IP_VS_RR=m
120CONFIG_IP_VS_WRR=m
121CONFIG_IP_VS_LC=m
122CONFIG_IP_VS_WLC=m
123CONFIG_IP_VS_LBLC=m
124CONFIG_IP_VS_LBLCR=m
125CONFIG_IP_VS_DH=m
126CONFIG_IP_VS_SH=m
127CONFIG_IP_VS_SED=m
128CONFIG_IP_VS_NQ=m
129CONFIG_NF_CONNTRACK_IPV4=m
130CONFIG_IP_NF_QUEUE=m
131CONFIG_IP_NF_IPTABLES=m
132CONFIG_IP_NF_MATCH_AH=m
133CONFIG_IP_NF_MATCH_ECN=m
134CONFIG_IP_NF_MATCH_TTL=m
135CONFIG_IP_NF_FILTER=m
136CONFIG_IP_NF_TARGET_REJECT=m
137CONFIG_IP_NF_TARGET_ULOG=m
138CONFIG_IP_NF_MANGLE=m
139CONFIG_IP_NF_TARGET_CLUSTERIP=m
140CONFIG_IP_NF_TARGET_ECN=m
141CONFIG_IP_NF_TARGET_TTL=m
142CONFIG_IP_NF_RAW=m
143CONFIG_IP_NF_ARPTABLES=m
144CONFIG_IP_NF_ARPFILTER=m
145CONFIG_IP_NF_ARP_MANGLE=m
146CONFIG_NF_CONNTRACK_IPV6=m
147CONFIG_IP6_NF_MATCH_AH=m
148CONFIG_IP6_NF_MATCH_EUI64=m
149CONFIG_IP6_NF_MATCH_FRAG=m
150CONFIG_IP6_NF_MATCH_OPTS=m
151CONFIG_IP6_NF_MATCH_HL=m
152CONFIG_IP6_NF_MATCH_IPV6HEADER=m
153CONFIG_IP6_NF_MATCH_MH=m
154CONFIG_IP6_NF_MATCH_RT=m
155CONFIG_IP6_NF_TARGET_HL=m
156CONFIG_IP6_NF_FILTER=m
157CONFIG_IP6_NF_TARGET_REJECT=m
158CONFIG_IP6_NF_MANGLE=m
159CONFIG_IP6_NF_RAW=m
160CONFIG_BRIDGE_NF_EBTABLES=m
161CONFIG_BRIDGE_EBT_BROUTE=m
162CONFIG_BRIDGE_EBT_T_FILTER=m
163CONFIG_BRIDGE_EBT_T_NAT=m
164CONFIG_BRIDGE_EBT_802_3=m
165CONFIG_BRIDGE_EBT_AMONG=m
166CONFIG_BRIDGE_EBT_ARP=m
167CONFIG_BRIDGE_EBT_IP=m
168CONFIG_BRIDGE_EBT_IP6=m
169CONFIG_BRIDGE_EBT_LIMIT=m
170CONFIG_BRIDGE_EBT_MARK=m
171CONFIG_BRIDGE_EBT_PKTTYPE=m
172CONFIG_BRIDGE_EBT_STP=m
173CONFIG_BRIDGE_EBT_VLAN=m
174CONFIG_BRIDGE_EBT_ARPREPLY=m
175CONFIG_BRIDGE_EBT_DNAT=m
176CONFIG_BRIDGE_EBT_MARK_T=m
177CONFIG_BRIDGE_EBT_REDIRECT=m
178CONFIG_BRIDGE_EBT_SNAT=m
179CONFIG_BRIDGE_EBT_LOG=m
180CONFIG_BRIDGE_EBT_ULOG=m
181CONFIG_BRIDGE_EBT_NFLOG=m
182CONFIG_IP_SCTP=m
183CONFIG_BRIDGE=m
184CONFIG_VLAN_8021Q=m
185CONFIG_VLAN_8021Q_GVRP=y
186CONFIG_ATALK=m
187CONFIG_DEV_APPLETALK=m
188CONFIG_IPDDP=m
189CONFIG_IPDDP_ENCAP=y
190CONFIG_IPDDP_DECAP=y
191CONFIG_PHONET=m
192CONFIG_NET_SCHED=y
193CONFIG_NET_SCH_CBQ=m
194CONFIG_NET_SCH_HTB=m
195CONFIG_NET_SCH_HFSC=m
196CONFIG_NET_SCH_PRIO=m
197CONFIG_NET_SCH_RED=m
198CONFIG_NET_SCH_SFQ=m
199CONFIG_NET_SCH_TEQL=m
200CONFIG_NET_SCH_TBF=m
201CONFIG_NET_SCH_GRED=m
202CONFIG_NET_SCH_DSMARK=m
203CONFIG_NET_SCH_NETEM=m
204CONFIG_NET_SCH_INGRESS=m
205CONFIG_NET_CLS_BASIC=m
206CONFIG_NET_CLS_TCINDEX=m
207CONFIG_NET_CLS_ROUTE4=m
208CONFIG_NET_CLS_FW=m
209CONFIG_NET_CLS_U32=m
210CONFIG_NET_CLS_RSVP=m
211CONFIG_NET_CLS_RSVP6=m
212CONFIG_NET_CLS_FLOW=m
213CONFIG_NET_CLS_ACT=y
214CONFIG_NET_ACT_POLICE=y
215CONFIG_NET_ACT_GACT=m
216CONFIG_GACT_PROB=y
217CONFIG_NET_ACT_MIRRED=m
218CONFIG_NET_ACT_IPT=m
219CONFIG_NET_ACT_NAT=m
220CONFIG_NET_ACT_PEDIT=m
221CONFIG_NET_ACT_SIMP=m
222CONFIG_NET_ACT_SKBEDIT=m
223CONFIG_NET_CLS_IND=y
224CONFIG_CFG80211=m
225CONFIG_MAC80211=m
226CONFIG_MAC80211_RC_PID=y
227CONFIG_MAC80211_RC_DEFAULT_PID=y
228CONFIG_MAC80211_MESH=y
229CONFIG_RFKILL=m
230CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
231CONFIG_CONNECTOR=m
232CONFIG_MTD=y
233CONFIG_MTD_CHAR=y
234CONFIG_MTD_BLOCK=y
235CONFIG_MTD_OOPS=m
236CONFIG_MTD_CFI=y
237CONFIG_MTD_CFI_INTELEXT=y
238CONFIG_MTD_CFI_AMDSTD=y
239CONFIG_MTD_CFI_STAA=y
240CONFIG_MTD_PHYSMAP=y
241CONFIG_MTD_UBI=m
242CONFIG_MTD_UBI_GLUEBI=m
243CONFIG_BLK_DEV_FD=m
244CONFIG_BLK_DEV_UMEM=m
245CONFIG_BLK_DEV_LOOP=m
246CONFIG_BLK_DEV_CRYPTOLOOP=m
247CONFIG_BLK_DEV_NBD=m
248CONFIG_BLK_DEV_RAM=y
249CONFIG_CDROM_PKTCDVD=m
250CONFIG_ATA_OVER_ETH=m
251CONFIG_VIRTIO_BLK=y
252CONFIG_IDE=y
253CONFIG_BLK_DEV_IDECD=y
254CONFIG_IDE_GENERIC=y
255CONFIG_BLK_DEV_GENERIC=y
256CONFIG_BLK_DEV_PIIX=y
257CONFIG_BLK_DEV_IT8213=m
258CONFIG_BLK_DEV_TC86C001=m
259CONFIG_RAID_ATTRS=m
260CONFIG_SCSI=m
261CONFIG_SCSI_TGT=m
262CONFIG_BLK_DEV_SD=m
263CONFIG_CHR_DEV_ST=m
264CONFIG_CHR_DEV_OSST=m
265CONFIG_BLK_DEV_SR=m
266CONFIG_BLK_DEV_SR_VENDOR=y
267CONFIG_CHR_DEV_SG=m
268CONFIG_SCSI_MULTI_LUN=y
269CONFIG_SCSI_CONSTANTS=y
270CONFIG_SCSI_LOGGING=y
271CONFIG_SCSI_SCAN_ASYNC=y
272CONFIG_SCSI_FC_ATTRS=m
273CONFIG_ISCSI_TCP=m
274CONFIG_BLK_DEV_3W_XXXX_RAID=m
275CONFIG_SCSI_3W_9XXX=m
276CONFIG_SCSI_ACARD=m
277CONFIG_SCSI_AACRAID=m
278CONFIG_SCSI_AIC7XXX=m
279CONFIG_AIC7XXX_RESET_DELAY_MS=15000
280# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
281CONFIG_MD=y
282CONFIG_BLK_DEV_MD=m
283CONFIG_MD_LINEAR=m
284CONFIG_MD_RAID0=m
285CONFIG_MD_RAID1=m
286CONFIG_MD_RAID10=m
287CONFIG_MD_RAID456=m
288CONFIG_MD_MULTIPATH=m
289CONFIG_MD_FAULTY=m
290CONFIG_BLK_DEV_DM=m
291CONFIG_DM_CRYPT=m
292CONFIG_DM_SNAPSHOT=m
293CONFIG_DM_MIRROR=m
294CONFIG_DM_ZERO=m
295CONFIG_DM_MULTIPATH=m
296CONFIG_NETDEVICES=y
297CONFIG_BONDING=m
298CONFIG_DUMMY=m
299CONFIG_EQUALIZER=m
300CONFIG_IFB=m
301CONFIG_MACVLAN=m
302CONFIG_TUN=m
303CONFIG_VETH=m
304CONFIG_VIRTIO_NET=y
305CONFIG_PCNET32=y
306CONFIG_CHELSIO_T3=m
307CONFIG_AX88796=m
308CONFIG_NETXEN_NIC=m
309CONFIG_TC35815=m
310CONFIG_MARVELL_PHY=m
311CONFIG_DAVICOM_PHY=m
312CONFIG_QSEMI_PHY=m
313CONFIG_LXT_PHY=m
314CONFIG_CICADA_PHY=m
315CONFIG_VITESSE_PHY=m
316CONFIG_SMSC_PHY=m
317CONFIG_BROADCOM_PHY=m
318CONFIG_ICPLUS_PHY=m
319CONFIG_REALTEK_PHY=m
320CONFIG_ATMEL=m
321CONFIG_PCI_ATMEL=m
322CONFIG_PRISM54=m
323CONFIG_HOSTAP=m
324CONFIG_HOSTAP_FIRMWARE=y
325CONFIG_HOSTAP_FIRMWARE_NVRAM=y
326CONFIG_HOSTAP_PLX=m
327CONFIG_HOSTAP_PCI=m
328CONFIG_IPW2100=m
329CONFIG_IPW2100_MONITOR=y
330CONFIG_LIBERTAS=m
331# CONFIG_INPUT_KEYBOARD is not set
332# CONFIG_INPUT_MOUSE is not set
333# CONFIG_SERIO_I8042 is not set
334CONFIG_VT_HW_CONSOLE_BINDING=y
335CONFIG_SERIAL_8250=y
336CONFIG_SERIAL_8250_CONSOLE=y
337# CONFIG_HWMON is not set
338CONFIG_FB=y
339CONFIG_FB_CIRRUS=y
340# CONFIG_VGA_CONSOLE is not set
341CONFIG_FRAMEBUFFER_CONSOLE=y
342CONFIG_HID=m
343CONFIG_RTC_CLASS=y
344CONFIG_RTC_DRV_CMOS=y
345CONFIG_UIO=m
346CONFIG_UIO_CIF=m
347CONFIG_VIRTIO_PCI=y
348CONFIG_VIRTIO_BALLOON=y
349CONFIG_VIRTIO_MMIO=y
350CONFIG_EXT2_FS=y
351CONFIG_EXT3_FS=y
352CONFIG_REISERFS_FS=m
353CONFIG_REISERFS_PROC_INFO=y
354CONFIG_REISERFS_FS_XATTR=y
355CONFIG_REISERFS_FS_POSIX_ACL=y
356CONFIG_REISERFS_FS_SECURITY=y
357CONFIG_JFS_FS=m
358CONFIG_JFS_POSIX_ACL=y
359CONFIG_JFS_SECURITY=y
360CONFIG_XFS_FS=m
361CONFIG_XFS_QUOTA=y
362CONFIG_XFS_POSIX_ACL=y
363CONFIG_QUOTA=y
364CONFIG_QFMT_V2=y
365CONFIG_FUSE_FS=m
366CONFIG_ISO9660_FS=m
367CONFIG_JOLIET=y
368CONFIG_ZISOFS=y
369CONFIG_UDF_FS=m
370CONFIG_MSDOS_FS=m
371CONFIG_VFAT_FS=m
372CONFIG_PROC_KCORE=y
373CONFIG_TMPFS=y
374CONFIG_AFFS_FS=m
375CONFIG_HFS_FS=m
376CONFIG_HFSPLUS_FS=m
377CONFIG_BEFS_FS=m
378CONFIG_BFS_FS=m
379CONFIG_EFS_FS=m
380CONFIG_JFFS2_FS=m
381CONFIG_JFFS2_FS_XATTR=y
382CONFIG_JFFS2_COMPRESSION_OPTIONS=y
383CONFIG_JFFS2_RUBIN=y
384CONFIG_CRAMFS=m
385CONFIG_VXFS_FS=m
386CONFIG_MINIX_FS=m
387CONFIG_ROMFS_FS=m
388CONFIG_SYSV_FS=m
389CONFIG_UFS_FS=m
390CONFIG_NFS_FS=y
391CONFIG_ROOT_NFS=y
392CONFIG_NFSD=y
393CONFIG_NFSD_V3=y
394CONFIG_NLS_CODEPAGE_437=m
395CONFIG_NLS_CODEPAGE_737=m
396CONFIG_NLS_CODEPAGE_775=m
397CONFIG_NLS_CODEPAGE_850=m
398CONFIG_NLS_CODEPAGE_852=m
399CONFIG_NLS_CODEPAGE_855=m
400CONFIG_NLS_CODEPAGE_857=m
401CONFIG_NLS_CODEPAGE_860=m
402CONFIG_NLS_CODEPAGE_861=m
403CONFIG_NLS_CODEPAGE_862=m
404CONFIG_NLS_CODEPAGE_863=m
405CONFIG_NLS_CODEPAGE_864=m
406CONFIG_NLS_CODEPAGE_865=m
407CONFIG_NLS_CODEPAGE_866=m
408CONFIG_NLS_CODEPAGE_869=m
409CONFIG_NLS_CODEPAGE_936=m
410CONFIG_NLS_CODEPAGE_950=m
411CONFIG_NLS_CODEPAGE_932=m
412CONFIG_NLS_CODEPAGE_949=m
413CONFIG_NLS_CODEPAGE_874=m
414CONFIG_NLS_ISO8859_8=m
415CONFIG_NLS_CODEPAGE_1250=m
416CONFIG_NLS_CODEPAGE_1251=m
417CONFIG_NLS_ASCII=m
418CONFIG_NLS_ISO8859_1=m
419CONFIG_NLS_ISO8859_2=m
420CONFIG_NLS_ISO8859_3=m
421CONFIG_NLS_ISO8859_4=m
422CONFIG_NLS_ISO8859_5=m
423CONFIG_NLS_ISO8859_6=m
424CONFIG_NLS_ISO8859_7=m
425CONFIG_NLS_ISO8859_9=m
426CONFIG_NLS_ISO8859_13=m
427CONFIG_NLS_ISO8859_14=m
428CONFIG_NLS_ISO8859_15=m
429CONFIG_NLS_KOI8_R=m
430CONFIG_NLS_KOI8_U=m
431CONFIG_CRYPTO_NULL=m
432CONFIG_CRYPTO_CRYPTD=m
433CONFIG_CRYPTO_LRW=m
434CONFIG_CRYPTO_PCBC=m
435CONFIG_CRYPTO_HMAC=y
436CONFIG_CRYPTO_XCBC=m
437CONFIG_CRYPTO_MD4=m
438CONFIG_CRYPTO_SHA256=m
439CONFIG_CRYPTO_SHA512=m
440CONFIG_CRYPTO_TGR192=m
441CONFIG_CRYPTO_WP512=m
442CONFIG_CRYPTO_ANUBIS=m
443CONFIG_CRYPTO_BLOWFISH=m
444CONFIG_CRYPTO_CAMELLIA=m
445CONFIG_CRYPTO_CAST5=m
446CONFIG_CRYPTO_CAST6=m
447CONFIG_CRYPTO_FCRYPT=m
448CONFIG_CRYPTO_KHAZAD=m
449CONFIG_CRYPTO_SERPENT=m
450CONFIG_CRYPTO_TEA=m
451CONFIG_CRYPTO_TWOFISH=m
452# CONFIG_CRYPTO_ANSI_CPRNG is not set
453CONFIG_CRC16=m
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
new file mode 100644
index 000000000000..93057a760dfa
--- /dev/null
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -0,0 +1,195 @@
1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y
4CONFIG_MIPS_VPE_LOADER=y
5CONFIG_MIPS_VPE_APSP_API=y
6CONFIG_HZ_100=y
7CONFIG_LOCALVERSION="aprp"
8CONFIG_SYSVIPC=y
9CONFIG_POSIX_MQUEUE=y
10CONFIG_AUDIT=y
11CONFIG_IKCONFIG=y
12CONFIG_IKCONFIG_PROC=y
13CONFIG_LOG_BUF_SHIFT=15
14CONFIG_SYSCTL_SYSCALL=y
15CONFIG_EMBEDDED=y
16CONFIG_SLAB=y
17CONFIG_MODULES=y
18CONFIG_MODULE_UNLOAD=y
19CONFIG_MODVERSIONS=y
20CONFIG_MODULE_SRCVERSION_ALL=y
21# CONFIG_BLK_DEV_BSG is not set
22CONFIG_PCI=y
23# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
24CONFIG_NET=y
25CONFIG_PACKET=y
26CONFIG_UNIX=y
27CONFIG_XFRM_USER=m
28CONFIG_NET_KEY=y
29CONFIG_INET=y
30CONFIG_IP_MULTICAST=y
31CONFIG_IP_ADVANCED_ROUTER=y
32CONFIG_IP_MULTIPLE_TABLES=y
33CONFIG_IP_ROUTE_MULTIPATH=y
34CONFIG_IP_ROUTE_VERBOSE=y
35CONFIG_IP_PNP=y
36CONFIG_IP_PNP_DHCP=y
37CONFIG_IP_PNP_BOOTP=y
38CONFIG_NET_IPIP=m
39CONFIG_IP_MROUTE=y
40CONFIG_IP_PIMSM_V1=y
41CONFIG_IP_PIMSM_V2=y
42CONFIG_SYN_COOKIES=y
43CONFIG_INET_AH=m
44CONFIG_INET_ESP=m
45CONFIG_INET_IPCOMP=m
46# CONFIG_INET_LRO is not set
47CONFIG_IPV6_PRIVACY=y
48CONFIG_INET6_AH=m
49CONFIG_INET6_ESP=m
50CONFIG_INET6_IPCOMP=m
51CONFIG_IPV6_TUNNEL=m
52CONFIG_BRIDGE=m
53CONFIG_VLAN_8021Q=m
54CONFIG_ATALK=m
55CONFIG_DEV_APPLETALK=m
56CONFIG_IPDDP=m
57CONFIG_IPDDP_ENCAP=y
58CONFIG_IPDDP_DECAP=y
59CONFIG_NET_SCHED=y
60CONFIG_NET_SCH_CBQ=m
61CONFIG_NET_SCH_HTB=m
62CONFIG_NET_SCH_HFSC=m
63CONFIG_NET_SCH_PRIO=m
64CONFIG_NET_SCH_RED=m
65CONFIG_NET_SCH_SFQ=m
66CONFIG_NET_SCH_TEQL=m
67CONFIG_NET_SCH_TBF=m
68CONFIG_NET_SCH_GRED=m
69CONFIG_NET_SCH_DSMARK=m
70CONFIG_NET_SCH_NETEM=m
71CONFIG_NET_SCH_INGRESS=m
72CONFIG_NET_CLS_BASIC=m
73CONFIG_NET_CLS_TCINDEX=m
74CONFIG_NET_CLS_ROUTE4=m
75CONFIG_NET_CLS_FW=m
76CONFIG_NET_CLS_U32=m
77CONFIG_NET_CLS_RSVP=m
78CONFIG_NET_CLS_RSVP6=m
79CONFIG_NET_CLS_ACT=y
80CONFIG_NET_ACT_POLICE=y
81CONFIG_NET_CLS_IND=y
82# CONFIG_WIRELESS is not set
83CONFIG_BLK_DEV_LOOP=y
84CONFIG_BLK_DEV_CRYPTOLOOP=m
85CONFIG_IDE=y
86# CONFIG_IDE_PROC_FS is not set
87# CONFIG_IDEPCI_PCIBUS_ORDER is not set
88CONFIG_BLK_DEV_GENERIC=y
89CONFIG_BLK_DEV_PIIX=y
90CONFIG_SCSI=y
91CONFIG_BLK_DEV_SD=y
92CONFIG_CHR_DEV_SG=y
93# CONFIG_SCSI_LOWLEVEL is not set
94CONFIG_NETDEVICES=y
95# CONFIG_NET_VENDOR_3COM is not set
96# CONFIG_NET_VENDOR_ADAPTEC is not set
97# CONFIG_NET_VENDOR_ALTEON is not set
98CONFIG_PCNET32=y
99# CONFIG_NET_VENDOR_ATHEROS is not set
100# CONFIG_NET_VENDOR_BROADCOM is not set
101# CONFIG_NET_VENDOR_BROCADE is not set
102# CONFIG_NET_VENDOR_CHELSIO is not set
103# CONFIG_NET_VENDOR_CISCO is not set
104# CONFIG_NET_VENDOR_DEC is not set
105# CONFIG_NET_VENDOR_DLINK is not set
106# CONFIG_NET_VENDOR_EMULEX is not set
107# CONFIG_NET_VENDOR_EXAR is not set
108# CONFIG_NET_VENDOR_HP is not set
109# CONFIG_NET_VENDOR_INTEL is not set
110# CONFIG_NET_VENDOR_MARVELL is not set
111# CONFIG_NET_VENDOR_MELLANOX is not set
112# CONFIG_NET_VENDOR_MICREL is not set
113# CONFIG_NET_VENDOR_MYRI is not set
114# CONFIG_NET_VENDOR_NATSEMI is not set
115# CONFIG_NET_VENDOR_NVIDIA is not set
116# CONFIG_NET_VENDOR_OKI is not set
117# CONFIG_NET_PACKET_ENGINE is not set
118# CONFIG_NET_VENDOR_QLOGIC is not set
119# CONFIG_NET_VENDOR_REALTEK is not set
120# CONFIG_NET_VENDOR_RDC is not set
121# CONFIG_NET_VENDOR_SEEQ is not set
122# CONFIG_NET_VENDOR_SILAN is not set
123# CONFIG_NET_VENDOR_SIS is not set
124# CONFIG_NET_VENDOR_SMSC is not set
125# CONFIG_NET_VENDOR_STMICRO is not set
126# CONFIG_NET_VENDOR_SUN is not set
127# CONFIG_NET_VENDOR_TEHUTI is not set
128# CONFIG_NET_VENDOR_TI is not set
129# CONFIG_NET_VENDOR_TOSHIBA is not set
130# CONFIG_NET_VENDOR_VIA is not set
131# CONFIG_WLAN is not set
132# CONFIG_VT is not set
133CONFIG_LEGACY_PTY_COUNT=16
134CONFIG_SERIAL_8250=y
135CONFIG_SERIAL_8250_CONSOLE=y
136CONFIG_HW_RANDOM=y
137# CONFIG_HWMON is not set
138CONFIG_VIDEO_OUTPUT_CONTROL=m
139CONFIG_FB=y
140CONFIG_FIRMWARE_EDID=y
141CONFIG_FB_MATROX=y
142CONFIG_FB_MATROX_G=y
143CONFIG_USB=y
144CONFIG_USB_EHCI_HCD=y
145# CONFIG_USB_EHCI_TT_NEWSCHED is not set
146CONFIG_USB_UHCI_HCD=y
147CONFIG_USB_STORAGE=y
148CONFIG_NEW_LEDS=y
149CONFIG_LEDS_CLASS=y
150CONFIG_LEDS_TRIGGERS=y
151CONFIG_LEDS_TRIGGER_TIMER=y
152CONFIG_LEDS_TRIGGER_IDE_DISK=y
153CONFIG_LEDS_TRIGGER_HEARTBEAT=y
154CONFIG_LEDS_TRIGGER_BACKLIGHT=y
155CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
156CONFIG_RTC_CLASS=y
157CONFIG_RTC_DRV_CMOS=y
158CONFIG_EXT2_FS=y
159CONFIG_EXT3_FS=y
160# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
161CONFIG_XFS_FS=y
162CONFIG_XFS_QUOTA=y
163CONFIG_XFS_POSIX_ACL=y
164CONFIG_QUOTA=y
165CONFIG_QFMT_V2=y
166CONFIG_MSDOS_FS=m
167CONFIG_VFAT_FS=m
168CONFIG_PROC_KCORE=y
169CONFIG_TMPFS=y
170CONFIG_NFS_FS=y
171CONFIG_ROOT_NFS=y
172CONFIG_CIFS=m
173CONFIG_CIFS_WEAK_PW_HASH=y
174CONFIG_CIFS_XATTR=y
175CONFIG_CIFS_POSIX=y
176CONFIG_NLS_CODEPAGE_437=m
177CONFIG_NLS_ISO8859_1=m
178# CONFIG_FTRACE is not set
179CONFIG_CRYPTO_NULL=m
180CONFIG_CRYPTO_PCBC=m
181CONFIG_CRYPTO_HMAC=y
182CONFIG_CRYPTO_MICHAEL_MIC=m
183CONFIG_CRYPTO_SHA512=m
184CONFIG_CRYPTO_TGR192=m
185CONFIG_CRYPTO_WP512=m
186CONFIG_CRYPTO_ANUBIS=m
187CONFIG_CRYPTO_BLOWFISH=m
188CONFIG_CRYPTO_CAST5=m
189CONFIG_CRYPTO_CAST6=m
190CONFIG_CRYPTO_KHAZAD=m
191CONFIG_CRYPTO_SERPENT=m
192CONFIG_CRYPTO_TEA=m
193CONFIG_CRYPTO_TWOFISH=m
194# CONFIG_CRYPTO_ANSI_CPRNG is not set
195# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltasmtc_defconfig b/arch/mips/configs/maltasmtc_defconfig
new file mode 100644
index 000000000000..4e54b75d89be
--- /dev/null
+++ b/arch/mips/configs/maltasmtc_defconfig
@@ -0,0 +1,196 @@
1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y
4CONFIG_MIPS_MT_SMTC=y
5# CONFIG_MIPS_MT_FPAFF is not set
6CONFIG_NR_CPUS=9
7CONFIG_HZ_48=y
8CONFIG_LOCALVERSION="smtc"
9CONFIG_SYSVIPC=y
10CONFIG_POSIX_MQUEUE=y
11CONFIG_AUDIT=y
12CONFIG_IKCONFIG=y
13CONFIG_IKCONFIG_PROC=y
14CONFIG_LOG_BUF_SHIFT=15
15CONFIG_SYSCTL_SYSCALL=y
16CONFIG_EMBEDDED=y
17CONFIG_SLAB=y
18CONFIG_MODULES=y
19CONFIG_MODULE_UNLOAD=y
20CONFIG_MODVERSIONS=y
21CONFIG_MODULE_SRCVERSION_ALL=y
22# CONFIG_BLK_DEV_BSG is not set
23CONFIG_PCI=y
24# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
25CONFIG_NET=y
26CONFIG_PACKET=y
27CONFIG_UNIX=y
28CONFIG_XFRM_USER=m
29CONFIG_NET_KEY=y
30CONFIG_INET=y
31CONFIG_IP_MULTICAST=y
32CONFIG_IP_ADVANCED_ROUTER=y
33CONFIG_IP_MULTIPLE_TABLES=y
34CONFIG_IP_ROUTE_MULTIPATH=y
35CONFIG_IP_ROUTE_VERBOSE=y
36CONFIG_IP_PNP=y
37CONFIG_IP_PNP_DHCP=y
38CONFIG_IP_PNP_BOOTP=y
39CONFIG_NET_IPIP=m
40CONFIG_IP_MROUTE=y
41CONFIG_IP_PIMSM_V1=y
42CONFIG_IP_PIMSM_V2=y
43CONFIG_SYN_COOKIES=y
44CONFIG_INET_AH=m
45CONFIG_INET_ESP=m
46CONFIG_INET_IPCOMP=m
47# CONFIG_INET_LRO is not set
48CONFIG_IPV6_PRIVACY=y
49CONFIG_INET6_AH=m
50CONFIG_INET6_ESP=m
51CONFIG_INET6_IPCOMP=m
52CONFIG_IPV6_TUNNEL=m
53CONFIG_BRIDGE=m
54CONFIG_VLAN_8021Q=m
55CONFIG_ATALK=m
56CONFIG_DEV_APPLETALK=m
57CONFIG_IPDDP=m
58CONFIG_IPDDP_ENCAP=y
59CONFIG_IPDDP_DECAP=y
60CONFIG_NET_SCHED=y
61CONFIG_NET_SCH_CBQ=m
62CONFIG_NET_SCH_HTB=m
63CONFIG_NET_SCH_HFSC=m
64CONFIG_NET_SCH_PRIO=m
65CONFIG_NET_SCH_RED=m
66CONFIG_NET_SCH_SFQ=m
67CONFIG_NET_SCH_TEQL=m
68CONFIG_NET_SCH_TBF=m
69CONFIG_NET_SCH_GRED=m
70CONFIG_NET_SCH_DSMARK=m
71CONFIG_NET_SCH_NETEM=m
72CONFIG_NET_SCH_INGRESS=m
73CONFIG_NET_CLS_BASIC=m
74CONFIG_NET_CLS_TCINDEX=m
75CONFIG_NET_CLS_ROUTE4=m
76CONFIG_NET_CLS_FW=m
77CONFIG_NET_CLS_U32=m
78CONFIG_NET_CLS_RSVP=m
79CONFIG_NET_CLS_RSVP6=m
80CONFIG_NET_CLS_ACT=y
81CONFIG_NET_ACT_POLICE=y
82CONFIG_NET_CLS_IND=y
83# CONFIG_WIRELESS is not set
84CONFIG_BLK_DEV_LOOP=y
85CONFIG_BLK_DEV_CRYPTOLOOP=m
86CONFIG_IDE=y
87# CONFIG_IDE_PROC_FS is not set
88# CONFIG_IDEPCI_PCIBUS_ORDER is not set
89CONFIG_BLK_DEV_GENERIC=y
90CONFIG_BLK_DEV_PIIX=y
91CONFIG_SCSI=y
92CONFIG_BLK_DEV_SD=y
93CONFIG_CHR_DEV_SG=y
94# CONFIG_SCSI_LOWLEVEL is not set
95CONFIG_NETDEVICES=y
96# CONFIG_NET_VENDOR_3COM is not set
97# CONFIG_NET_VENDOR_ADAPTEC is not set
98# CONFIG_NET_VENDOR_ALTEON is not set
99CONFIG_PCNET32=y
100# CONFIG_NET_VENDOR_ATHEROS is not set
101# CONFIG_NET_VENDOR_BROADCOM is not set
102# CONFIG_NET_VENDOR_BROCADE is not set
103# CONFIG_NET_VENDOR_CHELSIO is not set
104# CONFIG_NET_VENDOR_CISCO is not set
105# CONFIG_NET_VENDOR_DEC is not set
106# CONFIG_NET_VENDOR_DLINK is not set
107# CONFIG_NET_VENDOR_EMULEX is not set
108# CONFIG_NET_VENDOR_EXAR is not set
109# CONFIG_NET_VENDOR_HP is not set
110# CONFIG_NET_VENDOR_INTEL is not set
111# CONFIG_NET_VENDOR_MARVELL is not set
112# CONFIG_NET_VENDOR_MELLANOX is not set
113# CONFIG_NET_VENDOR_MICREL is not set
114# CONFIG_NET_VENDOR_MYRI is not set
115# CONFIG_NET_VENDOR_NATSEMI is not set
116# CONFIG_NET_VENDOR_NVIDIA is not set
117# CONFIG_NET_VENDOR_OKI is not set
118# CONFIG_NET_PACKET_ENGINE is not set
119# CONFIG_NET_VENDOR_QLOGIC is not set
120# CONFIG_NET_VENDOR_REALTEK is not set
121# CONFIG_NET_VENDOR_RDC is not set
122# CONFIG_NET_VENDOR_SEEQ is not set
123# CONFIG_NET_VENDOR_SILAN is not set
124# CONFIG_NET_VENDOR_SIS is not set
125# CONFIG_NET_VENDOR_SMSC is not set
126# CONFIG_NET_VENDOR_STMICRO is not set
127# CONFIG_NET_VENDOR_SUN is not set
128# CONFIG_NET_VENDOR_TEHUTI is not set
129# CONFIG_NET_VENDOR_TI is not set
130# CONFIG_NET_VENDOR_TOSHIBA is not set
131# CONFIG_NET_VENDOR_VIA is not set
132# CONFIG_WLAN is not set
133# CONFIG_VT is not set
134CONFIG_LEGACY_PTY_COUNT=16
135CONFIG_SERIAL_8250=y
136CONFIG_SERIAL_8250_CONSOLE=y
137CONFIG_HW_RANDOM=y
138# CONFIG_HWMON is not set
139CONFIG_VIDEO_OUTPUT_CONTROL=m
140CONFIG_FB=y
141CONFIG_FIRMWARE_EDID=y
142CONFIG_FB_MATROX=y
143CONFIG_FB_MATROX_G=y
144CONFIG_USB=y
145CONFIG_USB_EHCI_HCD=y
146# CONFIG_USB_EHCI_TT_NEWSCHED is not set
147CONFIG_USB_UHCI_HCD=y
148CONFIG_USB_STORAGE=y
149CONFIG_NEW_LEDS=y
150CONFIG_LEDS_CLASS=y
151CONFIG_LEDS_TRIGGERS=y
152CONFIG_LEDS_TRIGGER_TIMER=y
153CONFIG_LEDS_TRIGGER_IDE_DISK=y
154CONFIG_LEDS_TRIGGER_HEARTBEAT=y
155CONFIG_LEDS_TRIGGER_BACKLIGHT=y
156CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
157CONFIG_RTC_CLASS=y
158CONFIG_RTC_DRV_CMOS=y
159CONFIG_EXT2_FS=y
160CONFIG_EXT3_FS=y
161# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
162CONFIG_XFS_FS=y
163CONFIG_XFS_QUOTA=y
164CONFIG_XFS_POSIX_ACL=y
165CONFIG_QUOTA=y
166CONFIG_QFMT_V2=y
167CONFIG_MSDOS_FS=m
168CONFIG_VFAT_FS=m
169CONFIG_PROC_KCORE=y
170CONFIG_TMPFS=y
171CONFIG_NFS_FS=y
172CONFIG_ROOT_NFS=y
173CONFIG_CIFS=m
174CONFIG_CIFS_WEAK_PW_HASH=y
175CONFIG_CIFS_XATTR=y
176CONFIG_CIFS_POSIX=y
177CONFIG_NLS_CODEPAGE_437=m
178CONFIG_NLS_ISO8859_1=m
179# CONFIG_FTRACE is not set
180CONFIG_CRYPTO_NULL=m
181CONFIG_CRYPTO_PCBC=m
182CONFIG_CRYPTO_HMAC=y
183CONFIG_CRYPTO_MICHAEL_MIC=m
184CONFIG_CRYPTO_SHA512=m
185CONFIG_CRYPTO_TGR192=m
186CONFIG_CRYPTO_WP512=m
187CONFIG_CRYPTO_ANUBIS=m
188CONFIG_CRYPTO_BLOWFISH=m
189CONFIG_CRYPTO_CAST5=m
190CONFIG_CRYPTO_CAST6=m
191CONFIG_CRYPTO_KHAZAD=m
192CONFIG_CRYPTO_SERPENT=m
193CONFIG_CRYPTO_TEA=m
194CONFIG_CRYPTO_TWOFISH=m
195# CONFIG_CRYPTO_ANSI_CPRNG is not set
196# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
new file mode 100644
index 000000000000..8a666021b870
--- /dev/null
+++ b/arch/mips/configs/maltasmvp_defconfig
@@ -0,0 +1,199 @@
1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y
4CONFIG_MIPS_MT_SMP=y
5CONFIG_SCHED_SMT=y
6CONFIG_MIPS_CMP=y
7CONFIG_NR_CPUS=8
8CONFIG_HZ_100=y
9CONFIG_LOCALVERSION="cmp"
10CONFIG_SYSVIPC=y
11CONFIG_POSIX_MQUEUE=y
12CONFIG_AUDIT=y
13CONFIG_NO_HZ=y
14CONFIG_IKCONFIG=y
15CONFIG_IKCONFIG_PROC=y
16CONFIG_LOG_BUF_SHIFT=15
17CONFIG_SYSCTL_SYSCALL=y
18CONFIG_EMBEDDED=y
19CONFIG_SLAB=y
20CONFIG_MODULES=y
21CONFIG_MODULE_UNLOAD=y
22CONFIG_MODVERSIONS=y
23CONFIG_MODULE_SRCVERSION_ALL=y
24# CONFIG_BLK_DEV_BSG is not set
25CONFIG_PCI=y
26# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
27CONFIG_NET=y
28CONFIG_PACKET=y
29CONFIG_UNIX=y
30CONFIG_XFRM_USER=m
31CONFIG_NET_KEY=y
32CONFIG_INET=y
33CONFIG_IP_MULTICAST=y
34CONFIG_IP_ADVANCED_ROUTER=y
35CONFIG_IP_MULTIPLE_TABLES=y
36CONFIG_IP_ROUTE_MULTIPATH=y
37CONFIG_IP_ROUTE_VERBOSE=y
38CONFIG_IP_PNP=y
39CONFIG_IP_PNP_DHCP=y
40CONFIG_IP_PNP_BOOTP=y
41CONFIG_NET_IPIP=m
42CONFIG_IP_MROUTE=y
43CONFIG_IP_PIMSM_V1=y
44CONFIG_IP_PIMSM_V2=y
45CONFIG_SYN_COOKIES=y
46CONFIG_INET_AH=m
47CONFIG_INET_ESP=m
48CONFIG_INET_IPCOMP=m
49# CONFIG_INET_LRO is not set
50CONFIG_IPV6_PRIVACY=y
51CONFIG_INET6_AH=m
52CONFIG_INET6_ESP=m
53CONFIG_INET6_IPCOMP=m
54CONFIG_IPV6_TUNNEL=m
55CONFIG_BRIDGE=m
56CONFIG_VLAN_8021Q=m
57CONFIG_ATALK=m
58CONFIG_DEV_APPLETALK=m
59CONFIG_IPDDP=m
60CONFIG_IPDDP_ENCAP=y
61CONFIG_IPDDP_DECAP=y
62CONFIG_NET_SCHED=y
63CONFIG_NET_SCH_CBQ=m
64CONFIG_NET_SCH_HTB=m
65CONFIG_NET_SCH_HFSC=m
66CONFIG_NET_SCH_PRIO=m
67CONFIG_NET_SCH_RED=m
68CONFIG_NET_SCH_SFQ=m
69CONFIG_NET_SCH_TEQL=m
70CONFIG_NET_SCH_TBF=m
71CONFIG_NET_SCH_GRED=m
72CONFIG_NET_SCH_DSMARK=m
73CONFIG_NET_SCH_NETEM=m
74CONFIG_NET_SCH_INGRESS=m
75CONFIG_NET_CLS_BASIC=m
76CONFIG_NET_CLS_TCINDEX=m
77CONFIG_NET_CLS_ROUTE4=m
78CONFIG_NET_CLS_FW=m
79CONFIG_NET_CLS_U32=m
80CONFIG_NET_CLS_RSVP=m
81CONFIG_NET_CLS_RSVP6=m
82CONFIG_NET_CLS_ACT=y
83CONFIG_NET_ACT_POLICE=y
84CONFIG_NET_CLS_IND=y
85# CONFIG_WIRELESS is not set
86CONFIG_BLK_DEV_LOOP=y
87CONFIG_BLK_DEV_CRYPTOLOOP=m
88CONFIG_IDE=y
89# CONFIG_IDE_PROC_FS is not set
90# CONFIG_IDEPCI_PCIBUS_ORDER is not set
91CONFIG_BLK_DEV_GENERIC=y
92CONFIG_BLK_DEV_PIIX=y
93CONFIG_SCSI=y
94CONFIG_BLK_DEV_SD=y
95CONFIG_CHR_DEV_SG=y
96# CONFIG_SCSI_LOWLEVEL is not set
97CONFIG_NETDEVICES=y
98# CONFIG_NET_VENDOR_3COM is not set
99# CONFIG_NET_VENDOR_ADAPTEC is not set
100# CONFIG_NET_VENDOR_ALTEON is not set
101CONFIG_PCNET32=y
102# CONFIG_NET_VENDOR_ATHEROS is not set
103# CONFIG_NET_VENDOR_BROADCOM is not set
104# CONFIG_NET_VENDOR_BROCADE is not set
105# CONFIG_NET_VENDOR_CHELSIO is not set
106# CONFIG_NET_VENDOR_CISCO is not set
107# CONFIG_NET_VENDOR_DEC is not set
108# CONFIG_NET_VENDOR_DLINK is not set
109# CONFIG_NET_VENDOR_EMULEX is not set
110# CONFIG_NET_VENDOR_EXAR is not set
111# CONFIG_NET_VENDOR_HP is not set
112# CONFIG_NET_VENDOR_INTEL is not set
113# CONFIG_NET_VENDOR_MARVELL is not set
114# CONFIG_NET_VENDOR_MELLANOX is not set
115# CONFIG_NET_VENDOR_MICREL is not set
116# CONFIG_NET_VENDOR_MYRI is not set
117# CONFIG_NET_VENDOR_NATSEMI is not set
118# CONFIG_NET_VENDOR_NVIDIA is not set
119# CONFIG_NET_VENDOR_OKI is not set
120# CONFIG_NET_PACKET_ENGINE is not set
121# CONFIG_NET_VENDOR_QLOGIC is not set
122# CONFIG_NET_VENDOR_REALTEK is not set
123# CONFIG_NET_VENDOR_RDC is not set
124# CONFIG_NET_VENDOR_SEEQ is not set
125# CONFIG_NET_VENDOR_SILAN is not set
126# CONFIG_NET_VENDOR_SIS is not set
127# CONFIG_NET_VENDOR_SMSC is not set
128# CONFIG_NET_VENDOR_STMICRO is not set
129# CONFIG_NET_VENDOR_SUN is not set
130# CONFIG_NET_VENDOR_TEHUTI is not set
131# CONFIG_NET_VENDOR_TI is not set
132# CONFIG_NET_VENDOR_TOSHIBA is not set
133# CONFIG_NET_VENDOR_VIA is not set
134# CONFIG_NET_VENDOR_WIZNET is not set
135# CONFIG_WLAN is not set
136# CONFIG_VT is not set
137CONFIG_LEGACY_PTY_COUNT=4
138CONFIG_SERIAL_8250=y
139CONFIG_SERIAL_8250_CONSOLE=y
140CONFIG_HW_RANDOM=y
141# CONFIG_HWMON is not set
142CONFIG_VIDEO_OUTPUT_CONTROL=m
143CONFIG_FB=y
144CONFIG_FIRMWARE_EDID=y
145CONFIG_FB_MATROX=y
146CONFIG_FB_MATROX_G=y
147CONFIG_USB=y
148CONFIG_USB_EHCI_HCD=y
149# CONFIG_USB_EHCI_TT_NEWSCHED is not set
150CONFIG_USB_UHCI_HCD=y
151CONFIG_USB_STORAGE=y
152CONFIG_NEW_LEDS=y
153CONFIG_LEDS_CLASS=y
154CONFIG_LEDS_TRIGGERS=y
155CONFIG_LEDS_TRIGGER_TIMER=y
156CONFIG_LEDS_TRIGGER_IDE_DISK=y
157CONFIG_LEDS_TRIGGER_HEARTBEAT=y
158CONFIG_LEDS_TRIGGER_BACKLIGHT=y
159CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
160CONFIG_RTC_CLASS=y
161CONFIG_RTC_DRV_CMOS=y
162CONFIG_EXT2_FS=y
163CONFIG_EXT3_FS=y
164# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
165CONFIG_XFS_FS=y
166CONFIG_XFS_QUOTA=y
167CONFIG_XFS_POSIX_ACL=y
168CONFIG_QUOTA=y
169CONFIG_QFMT_V2=y
170CONFIG_MSDOS_FS=m
171CONFIG_VFAT_FS=m
172CONFIG_PROC_KCORE=y
173CONFIG_TMPFS=y
174CONFIG_NFS_FS=y
175CONFIG_ROOT_NFS=y
176CONFIG_CIFS=m
177CONFIG_CIFS_WEAK_PW_HASH=y
178CONFIG_CIFS_XATTR=y
179CONFIG_CIFS_POSIX=y
180CONFIG_NLS_CODEPAGE_437=m
181CONFIG_NLS_ISO8859_1=m
182# CONFIG_FTRACE is not set
183CONFIG_CRYPTO_NULL=m
184CONFIG_CRYPTO_PCBC=m
185CONFIG_CRYPTO_HMAC=y
186CONFIG_CRYPTO_MICHAEL_MIC=m
187CONFIG_CRYPTO_SHA512=m
188CONFIG_CRYPTO_TGR192=m
189CONFIG_CRYPTO_WP512=m
190CONFIG_CRYPTO_ANUBIS=m
191CONFIG_CRYPTO_BLOWFISH=m
192CONFIG_CRYPTO_CAST5=m
193CONFIG_CRYPTO_CAST6=m
194CONFIG_CRYPTO_KHAZAD=m
195CONFIG_CRYPTO_SERPENT=m
196CONFIG_CRYPTO_TEA=m
197CONFIG_CRYPTO_TWOFISH=m
198# CONFIG_CRYPTO_ANSI_CPRNG is not set
199# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
new file mode 100644
index 000000000000..9868fc9c1133
--- /dev/null
+++ b/arch/mips/configs/maltaup_defconfig
@@ -0,0 +1,194 @@
1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y
4CONFIG_HZ_100=y
5CONFIG_LOCALVERSION="up"
6CONFIG_SYSVIPC=y
7CONFIG_POSIX_MQUEUE=y
8CONFIG_AUDIT=y
9CONFIG_NO_HZ=y
10CONFIG_IKCONFIG=y
11CONFIG_IKCONFIG_PROC=y
12CONFIG_LOG_BUF_SHIFT=15
13CONFIG_SYSCTL_SYSCALL=y
14CONFIG_EMBEDDED=y
15CONFIG_SLAB=y
16CONFIG_MODULES=y
17CONFIG_MODULE_UNLOAD=y
18CONFIG_MODVERSIONS=y
19CONFIG_MODULE_SRCVERSION_ALL=y
20# CONFIG_BLK_DEV_BSG is not set
21CONFIG_PCI=y
22# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
23CONFIG_NET=y
24CONFIG_PACKET=y
25CONFIG_UNIX=y
26CONFIG_XFRM_USER=m
27CONFIG_NET_KEY=y
28CONFIG_INET=y
29CONFIG_IP_MULTICAST=y
30CONFIG_IP_ADVANCED_ROUTER=y
31CONFIG_IP_MULTIPLE_TABLES=y
32CONFIG_IP_ROUTE_MULTIPATH=y
33CONFIG_IP_ROUTE_VERBOSE=y
34CONFIG_IP_PNP=y
35CONFIG_IP_PNP_DHCP=y
36CONFIG_IP_PNP_BOOTP=y
37CONFIG_NET_IPIP=m
38CONFIG_IP_MROUTE=y
39CONFIG_IP_PIMSM_V1=y
40CONFIG_IP_PIMSM_V2=y
41CONFIG_SYN_COOKIES=y
42CONFIG_INET_AH=m
43CONFIG_INET_ESP=m
44CONFIG_INET_IPCOMP=m
45# CONFIG_INET_LRO is not set
46CONFIG_IPV6_PRIVACY=y
47CONFIG_INET6_AH=m
48CONFIG_INET6_ESP=m
49CONFIG_INET6_IPCOMP=m
50CONFIG_IPV6_TUNNEL=m
51CONFIG_BRIDGE=m
52CONFIG_VLAN_8021Q=m
53CONFIG_ATALK=m
54CONFIG_DEV_APPLETALK=m
55CONFIG_IPDDP=m
56CONFIG_IPDDP_ENCAP=y
57CONFIG_IPDDP_DECAP=y
58CONFIG_NET_SCHED=y
59CONFIG_NET_SCH_CBQ=m
60CONFIG_NET_SCH_HTB=m
61CONFIG_NET_SCH_HFSC=m
62CONFIG_NET_SCH_PRIO=m
63CONFIG_NET_SCH_RED=m
64CONFIG_NET_SCH_SFQ=m
65CONFIG_NET_SCH_TEQL=m
66CONFIG_NET_SCH_TBF=m
67CONFIG_NET_SCH_GRED=m
68CONFIG_NET_SCH_DSMARK=m
69CONFIG_NET_SCH_NETEM=m
70CONFIG_NET_SCH_INGRESS=m
71CONFIG_NET_CLS_BASIC=m
72CONFIG_NET_CLS_TCINDEX=m
73CONFIG_NET_CLS_ROUTE4=m
74CONFIG_NET_CLS_FW=m
75CONFIG_NET_CLS_U32=m
76CONFIG_NET_CLS_RSVP=m
77CONFIG_NET_CLS_RSVP6=m
78CONFIG_NET_CLS_ACT=y
79CONFIG_NET_ACT_POLICE=y
80CONFIG_NET_CLS_IND=y
81# CONFIG_WIRELESS is not set
82CONFIG_BLK_DEV_LOOP=y
83CONFIG_BLK_DEV_CRYPTOLOOP=m
84CONFIG_IDE=y
85# CONFIG_IDE_PROC_FS is not set
86# CONFIG_IDEPCI_PCIBUS_ORDER is not set
87CONFIG_BLK_DEV_GENERIC=y
88CONFIG_BLK_DEV_PIIX=y
89CONFIG_SCSI=y
90CONFIG_BLK_DEV_SD=y
91CONFIG_CHR_DEV_SG=y
92# CONFIG_SCSI_LOWLEVEL is not set
93CONFIG_NETDEVICES=y
94# CONFIG_NET_VENDOR_3COM is not set
95# CONFIG_NET_VENDOR_ADAPTEC is not set
96# CONFIG_NET_VENDOR_ALTEON is not set
97CONFIG_PCNET32=y
98# CONFIG_NET_VENDOR_ATHEROS is not set
99# CONFIG_NET_VENDOR_BROADCOM is not set
100# CONFIG_NET_VENDOR_BROCADE is not set
101# CONFIG_NET_VENDOR_CHELSIO is not set
102# CONFIG_NET_VENDOR_CISCO is not set
103# CONFIG_NET_VENDOR_DEC is not set
104# CONFIG_NET_VENDOR_DLINK is not set
105# CONFIG_NET_VENDOR_EMULEX is not set
106# CONFIG_NET_VENDOR_EXAR is not set
107# CONFIG_NET_VENDOR_HP is not set
108# CONFIG_NET_VENDOR_INTEL is not set
109# CONFIG_NET_VENDOR_MARVELL is not set
110# CONFIG_NET_VENDOR_MELLANOX is not set
111# CONFIG_NET_VENDOR_MICREL is not set
112# CONFIG_NET_VENDOR_MYRI is not set
113# CONFIG_NET_VENDOR_NATSEMI is not set
114# CONFIG_NET_VENDOR_NVIDIA is not set
115# CONFIG_NET_VENDOR_OKI is not set
116# CONFIG_NET_PACKET_ENGINE is not set
117# CONFIG_NET_VENDOR_QLOGIC is not set
118# CONFIG_NET_VENDOR_REALTEK is not set
119# CONFIG_NET_VENDOR_RDC is not set
120# CONFIG_NET_VENDOR_SEEQ is not set
121# CONFIG_NET_VENDOR_SILAN is not set
122# CONFIG_NET_VENDOR_SIS is not set
123# CONFIG_NET_VENDOR_SMSC is not set
124# CONFIG_NET_VENDOR_STMICRO is not set
125# CONFIG_NET_VENDOR_SUN is not set
126# CONFIG_NET_VENDOR_TEHUTI is not set
127# CONFIG_NET_VENDOR_TI is not set
128# CONFIG_NET_VENDOR_TOSHIBA is not set
129# CONFIG_NET_VENDOR_VIA is not set
130# CONFIG_WLAN is not set
131# CONFIG_VT is not set
132CONFIG_LEGACY_PTY_COUNT=16
133CONFIG_SERIAL_8250=y
134CONFIG_SERIAL_8250_CONSOLE=y
135CONFIG_HW_RANDOM=y
136# CONFIG_HWMON is not set
137CONFIG_VIDEO_OUTPUT_CONTROL=m
138CONFIG_FB=y
139CONFIG_FIRMWARE_EDID=y
140CONFIG_FB_MATROX=y
141CONFIG_FB_MATROX_G=y
142CONFIG_USB=y
143CONFIG_USB_EHCI_HCD=y
144# CONFIG_USB_EHCI_TT_NEWSCHED is not set
145CONFIG_USB_UHCI_HCD=y
146CONFIG_USB_STORAGE=y
147CONFIG_NEW_LEDS=y
148CONFIG_LEDS_CLASS=y
149CONFIG_LEDS_TRIGGERS=y
150CONFIG_LEDS_TRIGGER_TIMER=y
151CONFIG_LEDS_TRIGGER_IDE_DISK=y
152CONFIG_LEDS_TRIGGER_HEARTBEAT=y
153CONFIG_LEDS_TRIGGER_BACKLIGHT=y
154CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
155CONFIG_RTC_CLASS=y
156CONFIG_RTC_DRV_CMOS=y
157CONFIG_EXT2_FS=y
158CONFIG_EXT3_FS=y
159# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
160CONFIG_XFS_FS=y
161CONFIG_XFS_QUOTA=y
162CONFIG_XFS_POSIX_ACL=y
163CONFIG_QUOTA=y
164CONFIG_QFMT_V2=y
165CONFIG_MSDOS_FS=m
166CONFIG_VFAT_FS=m
167CONFIG_PROC_KCORE=y
168CONFIG_TMPFS=y
169CONFIG_NFS_FS=y
170CONFIG_ROOT_NFS=y
171CONFIG_CIFS=m
172CONFIG_CIFS_WEAK_PW_HASH=y
173CONFIG_CIFS_XATTR=y
174CONFIG_CIFS_POSIX=y
175CONFIG_NLS_CODEPAGE_437=m
176CONFIG_NLS_ISO8859_1=m
177# CONFIG_FTRACE is not set
178CONFIG_CRYPTO_NULL=m
179CONFIG_CRYPTO_PCBC=m
180CONFIG_CRYPTO_HMAC=y
181CONFIG_CRYPTO_MICHAEL_MIC=m
182CONFIG_CRYPTO_SHA512=m
183CONFIG_CRYPTO_TGR192=m
184CONFIG_CRYPTO_WP512=m
185CONFIG_CRYPTO_ANUBIS=m
186CONFIG_CRYPTO_BLOWFISH=m
187CONFIG_CRYPTO_CAST5=m
188CONFIG_CRYPTO_CAST6=m
189CONFIG_CRYPTO_KHAZAD=m
190CONFIG_CRYPTO_SERPENT=m
191CONFIG_CRYPTO_TEA=m
192CONFIG_CRYPTO_TWOFISH=m
193# CONFIG_CRYPTO_ANSI_CPRNG is not set
194# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/sead3_defconfig b/arch/mips/configs/sead3_defconfig
index e3eec68d9132..0abe681c11a0 100644
--- a/arch/mips/configs/sead3_defconfig
+++ b/arch/mips/configs/sead3_defconfig
@@ -2,7 +2,6 @@ CONFIG_MIPS_SEAD3=y
2CONFIG_CPU_LITTLE_ENDIAN=y 2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y 3CONFIG_CPU_MIPS32_R2=y
4CONFIG_HZ_100=y 4CONFIG_HZ_100=y
5CONFIG_EXPERIMENTAL=y
6CONFIG_SYSVIPC=y 5CONFIG_SYSVIPC=y
7CONFIG_POSIX_MQUEUE=y 6CONFIG_POSIX_MQUEUE=y
8CONFIG_NO_HZ=y 7CONFIG_NO_HZ=y
@@ -115,10 +114,8 @@ CONFIG_NLS_ISO8859_1=y
115CONFIG_NLS_ISO8859_15=y 114CONFIG_NLS_ISO8859_15=y
116CONFIG_NLS_UTF8=y 115CONFIG_NLS_UTF8=y
117# CONFIG_FTRACE is not set 116# CONFIG_FTRACE is not set
118CONFIG_CRYPTO=y
119CONFIG_CRYPTO_CBC=y 117CONFIG_CRYPTO_CBC=y
120CONFIG_CRYPTO_ECB=y 118CONFIG_CRYPTO_ECB=y
121CONFIG_CRYPTO_AES=y
122CONFIG_CRYPTO_ARC4=y 119CONFIG_CRYPTO_ARC4=y
123# CONFIG_CRYPTO_ANSI_CPRNG is not set 120# CONFIG_CRYPTO_ANSI_CPRNG is not set
124# CONFIG_CRYPTO_HW is not set 121# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/sead3micro_defconfig b/arch/mips/configs/sead3micro_defconfig
new file mode 100644
index 000000000000..2a0da5bf4b64
--- /dev/null
+++ b/arch/mips/configs/sead3micro_defconfig
@@ -0,0 +1,122 @@
1CONFIG_MIPS_SEAD3=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y
4CONFIG_CPU_MICROMIPS=y
5CONFIG_HZ_100=y
6CONFIG_SYSVIPC=y
7CONFIG_POSIX_MQUEUE=y
8CONFIG_NO_HZ=y
9CONFIG_HIGH_RES_TIMERS=y
10CONFIG_IKCONFIG=y
11CONFIG_IKCONFIG_PROC=y
12CONFIG_LOG_BUF_SHIFT=15
13CONFIG_EMBEDDED=y
14CONFIG_SLAB=y
15CONFIG_PROFILING=y
16CONFIG_OPROFILE=y
17CONFIG_MODULES=y
18# CONFIG_BLK_DEV_BSG is not set
19# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
20CONFIG_NET=y
21CONFIG_PACKET=y
22CONFIG_UNIX=y
23CONFIG_INET=y
24CONFIG_IP_PNP=y
25CONFIG_IP_PNP_DHCP=y
26CONFIG_IP_PNP_BOOTP=y
27# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
28# CONFIG_INET_XFRM_MODE_TUNNEL is not set
29# CONFIG_INET_XFRM_MODE_BEET is not set
30# CONFIG_INET_LRO is not set
31# CONFIG_INET_DIAG is not set
32# CONFIG_IPV6 is not set
33# CONFIG_WIRELESS is not set
34CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
35CONFIG_MTD=y
36CONFIG_MTD_CHAR=y
37CONFIG_MTD_BLOCK=y
38CONFIG_MTD_CFI=y
39CONFIG_MTD_CFI_INTELEXT=y
40CONFIG_MTD_PHYSMAP=y
41CONFIG_MTD_UBI=y
42CONFIG_MTD_UBI_GLUEBI=y
43CONFIG_BLK_DEV_LOOP=y
44CONFIG_BLK_DEV_CRYPTOLOOP=m
45CONFIG_SCSI=y
46# CONFIG_SCSI_PROC_FS is not set
47CONFIG_BLK_DEV_SD=y
48CONFIG_CHR_DEV_SG=y
49# CONFIG_SCSI_LOWLEVEL is not set
50CONFIG_NETDEVICES=y
51CONFIG_SMSC911X=y
52# CONFIG_NET_VENDOR_WIZNET is not set
53CONFIG_MARVELL_PHY=y
54CONFIG_DAVICOM_PHY=y
55CONFIG_QSEMI_PHY=y
56CONFIG_LXT_PHY=y
57CONFIG_CICADA_PHY=y
58CONFIG_VITESSE_PHY=y
59CONFIG_SMSC_PHY=y
60CONFIG_BROADCOM_PHY=y
61CONFIG_ICPLUS_PHY=y
62# CONFIG_WLAN is not set
63# CONFIG_INPUT_MOUSEDEV is not set
64# CONFIG_INPUT_KEYBOARD is not set
65# CONFIG_INPUT_MOUSE is not set
66# CONFIG_SERIO is not set
67# CONFIG_CONSOLE_TRANSLATIONS is not set
68CONFIG_VT_HW_CONSOLE_BINDING=y
69CONFIG_LEGACY_PTY_COUNT=32
70CONFIG_SERIAL_8250=y
71CONFIG_SERIAL_8250_CONSOLE=y
72CONFIG_SERIAL_8250_NR_UARTS=2
73CONFIG_SERIAL_8250_RUNTIME_UARTS=2
74# CONFIG_HW_RANDOM is not set
75CONFIG_I2C=y
76# CONFIG_I2C_COMPAT is not set
77CONFIG_I2C_CHARDEV=y
78# CONFIG_I2C_HELPER_AUTO is not set
79CONFIG_SPI=y
80CONFIG_SENSORS_ADT7475=y
81CONFIG_BACKLIGHT_LCD_SUPPORT=y
82CONFIG_LCD_CLASS_DEVICE=y
83CONFIG_BACKLIGHT_CLASS_DEVICE=y
84# CONFIG_VGA_CONSOLE is not set
85CONFIG_USB=y
86CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
87CONFIG_USB_EHCI_HCD=y
88CONFIG_USB_EHCI_ROOT_HUB_TT=y
89CONFIG_USB_STORAGE=y
90CONFIG_MMC=y
91CONFIG_MMC_DEBUG=y
92CONFIG_MMC_SPI=y
93CONFIG_NEW_LEDS=y
94CONFIG_LEDS_CLASS=y
95CONFIG_LEDS_TRIGGERS=y
96CONFIG_LEDS_TRIGGER_HEARTBEAT=y
97CONFIG_RTC_CLASS=y
98CONFIG_RTC_DRV_M41T80=y
99CONFIG_EXT3_FS=y
100# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
101CONFIG_XFS_FS=y
102CONFIG_XFS_QUOTA=y
103CONFIG_XFS_POSIX_ACL=y
104CONFIG_QUOTA=y
105# CONFIG_PRINT_QUOTA_WARNING is not set
106CONFIG_MSDOS_FS=m
107CONFIG_VFAT_FS=m
108CONFIG_TMPFS=y
109CONFIG_JFFS2_FS=y
110CONFIG_NFS_FS=y
111CONFIG_ROOT_NFS=y
112CONFIG_NLS_CODEPAGE_437=y
113CONFIG_NLS_ASCII=y
114CONFIG_NLS_ISO8859_1=y
115CONFIG_NLS_ISO8859_15=y
116CONFIG_NLS_UTF8=y
117# CONFIG_FTRACE is not set
118CONFIG_CRYPTO_CBC=y
119CONFIG_CRYPTO_ECB=y
120CONFIG_CRYPTO_ARC4=y
121# CONFIG_CRYPTO_ANSI_CPRNG is not set
122# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/fw/lib/Makefile b/arch/mips/fw/lib/Makefile
index 84befc968fc4..529150516777 100644
--- a/arch/mips/fw/lib/Makefile
+++ b/arch/mips/fw/lib/Makefile
@@ -2,4 +2,6 @@
2# Makefile for generic prom monitor library routines under Linux. 2# Makefile for generic prom monitor library routines under Linux.
3# 3#
4 4
5lib-y += cmdline.o
6
5lib-$(CONFIG_64BIT) += call_o32.o 7lib-$(CONFIG_64BIT) += call_o32.o
diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c
new file mode 100644
index 000000000000..ffd0345780ae
--- /dev/null
+++ b/arch/mips/fw/lib/cmdline.c
@@ -0,0 +1,101 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 */
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/string.h>
11
12#include <asm/addrspace.h>
13#include <asm/fw/fw.h>
14
15int fw_argc;
16int *_fw_argv;
17int *_fw_envp;
18
19void __init fw_init_cmdline(void)
20{
21 int i;
22
23 /* Validate command line parameters. */
24 if ((fw_arg0 >= CKSEG0) || (fw_arg1 < CKSEG0)) {
25 fw_argc = 0;
26 _fw_argv = NULL;
27 } else {
28 fw_argc = (fw_arg0 & 0x0000ffff);
29 _fw_argv = (int *)fw_arg1;
30 }
31
32 /* Validate environment pointer. */
33 if (fw_arg2 < CKSEG0)
34 _fw_envp = NULL;
35 else
36 _fw_envp = (int *)fw_arg2;
37
38 for (i = 1; i < fw_argc; i++) {
39 strlcat(arcs_cmdline, fw_argv(i), COMMAND_LINE_SIZE);
40 if (i < (fw_argc - 1))
41 strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
42 }
43}
44
45char * __init fw_getcmdline(void)
46{
47 return &(arcs_cmdline[0]);
48}
49
50char *fw_getenv(char *envname)
51{
52 char *result = NULL;
53
54 if (_fw_envp != NULL) {
55 /*
56 * Return a pointer to the given environment variable.
57 * YAMON uses "name", "value" pairs, while U-Boot uses
58 * "name=value".
59 */
60 int i, yamon, index = 0;
61
62 yamon = (strchr(fw_envp(index), '=') == NULL);
63 i = strlen(envname);
64
65 while (fw_envp(index)) {
66 if (strncmp(envname, fw_envp(index), i) == 0) {
67 if (yamon) {
68 result = fw_envp(index + 1);
69 break;
70 } else if (fw_envp(index)[i] == '=') {
71 result = (fw_envp(index + 1) + i);
72 break;
73 }
74 }
75
76 /* Increment array index. */
77 if (yamon)
78 index += 2;
79 else
80 index += 1;
81 }
82 }
83
84 return result;
85}
86
87unsigned long fw_getenvl(char *envname)
88{
89 unsigned long envl = 0UL;
90 char *str;
91 long val;
92 int tmp;
93
94 str = fw_getenv(envname);
95 if (str) {
96 tmp = kstrtol(str, 0, &val);
97 envl = (unsigned long)val;
98 }
99
100 return envl;
101}
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
index 164a21e65b42..879691d194af 100644
--- a/arch/mips/include/asm/asm.h
+++ b/arch/mips/include/asm/asm.h
@@ -296,6 +296,7 @@ symbol = value
296#define LONG_SUBU subu 296#define LONG_SUBU subu
297#define LONG_L lw 297#define LONG_L lw
298#define LONG_S sw 298#define LONG_S sw
299#define LONG_SP swp
299#define LONG_SLL sll 300#define LONG_SLL sll
300#define LONG_SLLV sllv 301#define LONG_SLLV sllv
301#define LONG_SRL srl 302#define LONG_SRL srl
@@ -318,6 +319,7 @@ symbol = value
318#define LONG_SUBU dsubu 319#define LONG_SUBU dsubu
319#define LONG_L ld 320#define LONG_L ld
320#define LONG_S sd 321#define LONG_S sd
322#define LONG_SP sdp
321#define LONG_SLL dsll 323#define LONG_SLL dsll
322#define LONG_SLLV dsllv 324#define LONG_SLLV dsllv
323#define LONG_SRL dsrl 325#define LONG_SRL dsrl
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index b71dd5b16085..4d2cdea5aa37 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -104,6 +104,7 @@ struct boot_mem_map {
104extern struct boot_mem_map boot_mem_map; 104extern struct boot_mem_map boot_mem_map;
105 105
106extern void add_memory_region(phys_t start, phys_t size, long type); 106extern void add_memory_region(phys_t start, phys_t size, long type);
107extern void detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max);
107 108
108extern void prom_init(void); 109extern void prom_init(void);
109extern void prom_free_prom_memory(void); 110extern void prom_free_prom_memory(void);
diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h
index 888766ae1f85..e28a3e0eb3cb 100644
--- a/arch/mips/include/asm/branch.h
+++ b/arch/mips/include/asm/branch.h
@@ -11,6 +11,14 @@
11#include <asm/ptrace.h> 11#include <asm/ptrace.h>
12#include <asm/inst.h> 12#include <asm/inst.h>
13 13
14extern int __isa_exception_epc(struct pt_regs *regs);
15extern int __compute_return_epc(struct pt_regs *regs);
16extern int __compute_return_epc_for_insn(struct pt_regs *regs,
17 union mips_instruction insn);
18extern int __microMIPS_compute_return_epc(struct pt_regs *regs);
19extern int __MIPS16e_compute_return_epc(struct pt_regs *regs);
20
21
14static inline int delay_slot(struct pt_regs *regs) 22static inline int delay_slot(struct pt_regs *regs)
15{ 23{
16 return regs->cp0_cause & CAUSEF_BD; 24 return regs->cp0_cause & CAUSEF_BD;
@@ -18,20 +26,27 @@ static inline int delay_slot(struct pt_regs *regs)
18 26
19static inline unsigned long exception_epc(struct pt_regs *regs) 27static inline unsigned long exception_epc(struct pt_regs *regs)
20{ 28{
21 if (!delay_slot(regs)) 29 if (likely(!delay_slot(regs)))
22 return regs->cp0_epc; 30 return regs->cp0_epc;
23 31
32 if (get_isa16_mode(regs->cp0_epc))
33 return __isa_exception_epc(regs);
34
24 return regs->cp0_epc + 4; 35 return regs->cp0_epc + 4;
25} 36}
26 37
27#define BRANCH_LIKELY_TAKEN 0x0001 38#define BRANCH_LIKELY_TAKEN 0x0001
28 39
29extern int __compute_return_epc(struct pt_regs *regs);
30extern int __compute_return_epc_for_insn(struct pt_regs *regs,
31 union mips_instruction insn);
32
33static inline int compute_return_epc(struct pt_regs *regs) 40static inline int compute_return_epc(struct pt_regs *regs)
34{ 41{
42 if (get_isa16_mode(regs->cp0_epc)) {
43 if (cpu_has_mmips)
44 return __microMIPS_compute_return_epc(regs);
45 if (cpu_has_mips16)
46 return __MIPS16e_compute_return_epc(regs);
47 return regs->cp0_epc;
48 }
49
35 if (!delay_slot(regs)) { 50 if (!delay_slot(regs)) {
36 regs->cp0_epc += 4; 51 regs->cp0_epc += 4;
37 return 0; 52 return 0;
@@ -40,4 +55,19 @@ static inline int compute_return_epc(struct pt_regs *regs)
40 return __compute_return_epc(regs); 55 return __compute_return_epc(regs);
41} 56}
42 57
58static inline int MIPS16e_compute_return_epc(struct pt_regs *regs,
59 union mips16e_instruction *inst)
60{
61 if (likely(!delay_slot(regs))) {
62 if (inst->ri.opcode == MIPS16e_extend_op) {
63 regs->cp0_epc += 4;
64 return 0;
65 }
66 regs->cp0_epc += 2;
67 return 0;
68 }
69
70 return __MIPS16e_compute_return_epc(regs);
71}
72
43#endif /* _ASM_BRANCH_H */ 73#endif /* _ASM_BRANCH_H */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 1a57e8b4d092..e5ec8fcd8afa 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -113,6 +113,9 @@
113#ifndef cpu_has_pindexed_dcache 113#ifndef cpu_has_pindexed_dcache
114#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX) 114#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
115#endif 115#endif
116#ifndef cpu_has_local_ebase
117#define cpu_has_local_ebase 1
118#endif
116 119
117/* 120/*
118 * I-Cache snoops remote store. This only matters on SMP. Some multiprocessors 121 * I-Cache snoops remote store. This only matters on SMP. Some multiprocessors
diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h
new file mode 100644
index 000000000000..242cbb3ca582
--- /dev/null
+++ b/arch/mips/include/asm/dma-coherence.h
@@ -0,0 +1,15 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
7 *
8 */
9#ifndef __ASM_DMA_COHERENCE_H
10#define __ASM_DMA_COHERENCE_H
11
12extern int coherentio;
13extern int hw_coherentio;
14
15#endif
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index f8fc74b6cb47..84238c574d5e 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -2,6 +2,7 @@
2#define _ASM_DMA_MAPPING_H 2#define _ASM_DMA_MAPPING_H
3 3
4#include <asm/scatterlist.h> 4#include <asm/scatterlist.h>
5#include <asm/dma-coherence.h>
5#include <asm/cache.h> 6#include <asm/cache.h>
6#include <asm-generic/dma-coherent.h> 7#include <asm-generic/dma-coherent.h>
7 8
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 3b4092705567..2abb587d5ab4 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -54,6 +54,12 @@ do { \
54extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir, 54extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
55 unsigned long cpc); 55 unsigned long cpc);
56extern int do_dsemulret(struct pt_regs *xcp); 56extern int do_dsemulret(struct pt_regs *xcp);
57extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
58 struct mips_fpu_struct *ctx, int has_fpu,
59 void *__user *fault_addr);
60int process_fpemu_return(int sig, void __user *fault_addr);
61int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
62 unsigned long *contpc);
57 63
58/* 64/*
59 * Instruction inserted following the badinst to further tag the sequence 65 * Instruction inserted following the badinst to further tag the sequence
diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h
new file mode 100644
index 000000000000..d6c50a7e9ede
--- /dev/null
+++ b/arch/mips/include/asm/fw/fw.h
@@ -0,0 +1,47 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc.
7 */
8#ifndef __ASM_FW_H_
9#define __ASM_FW_H_
10
11#include <asm/bootinfo.h> /* For cleaner code... */
12
13enum fw_memtypes {
14 fw_dontuse,
15 fw_code,
16 fw_free,
17};
18
19typedef struct {
20 unsigned long base; /* Within KSEG0 */
21 unsigned int size; /* bytes */
22 enum fw_memtypes type; /* fw_memtypes */
23} fw_memblock_t;
24
25/* Maximum number of memory block descriptors. */
26#define FW_MAX_MEMBLOCKS 32
27
28extern int fw_argc;
29extern int *_fw_argv;
30extern int *_fw_envp;
31
32/*
33 * Most firmware like YAMON, PMON, etc. pass arguments and environment
34 * variables as 32-bit pointers. These take care of sign extension.
35 */
36#define fw_argv(index) ((char *)(long)_fw_argv[(index)])
37#define fw_envp(index) ((char *)(long)_fw_envp[(index)])
38
39extern void fw_init_cmdline(void);
40extern char *fw_getcmdline(void);
41extern fw_memblock_t *fw_getmdesc(void);
42extern void fw_meminit(void);
43extern char *fw_getenv(char *name);
44extern unsigned long fw_getenvl(char *name);
45extern void fw_init_early_console(char port);
46
47#endif /* __ASM_FW_H_ */
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h
index bdc9786ab5a7..7153b32de18e 100644
--- a/arch/mips/include/asm/gic.h
+++ b/arch/mips/include/asm/gic.h
@@ -202,7 +202,7 @@
202#define GIC_VPE_WD_COUNT0_OFS 0x0094 202#define GIC_VPE_WD_COUNT0_OFS 0x0094
203#define GIC_VPE_WD_INITIAL0_OFS 0x0098 203#define GIC_VPE_WD_INITIAL0_OFS 0x0098
204#define GIC_VPE_COMPARE_LO_OFS 0x00a0 204#define GIC_VPE_COMPARE_LO_OFS 0x00a0
205#define GIC_VPE_COMPARE_HI 0x00a4 205#define GIC_VPE_COMPARE_HI_OFS 0x00a4
206 206
207#define GIC_VPE_EIC_SHADOW_SET_BASE 0x0100 207#define GIC_VPE_EIC_SHADOW_SET_BASE 0x0100
208#define GIC_VPE_EIC_SS(intr) \ 208#define GIC_VPE_EIC_SS(intr) \
@@ -359,7 +359,11 @@ struct gic_shared_intr_map {
359/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ 359/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
360#define GIC_PIN_TO_VEC_OFFSET (1) 360#define GIC_PIN_TO_VEC_OFFSET (1)
361 361
362extern int gic_present; 362#include <linux/clocksource.h>
363#include <linux/irq.h>
364
365extern unsigned int gic_present;
366extern unsigned int gic_frequency;
363extern unsigned long _gic_base; 367extern unsigned long _gic_base;
364extern unsigned int gic_irq_base; 368extern unsigned int gic_irq_base;
365extern unsigned int gic_irq_flags[]; 369extern unsigned int gic_irq_flags[];
@@ -368,18 +372,20 @@ extern struct gic_shared_intr_map gic_shared_intr_map[];
368extern void gic_init(unsigned long gic_base_addr, 372extern void gic_init(unsigned long gic_base_addr,
369 unsigned long gic_addrspace_size, struct gic_intr_map *intrmap, 373 unsigned long gic_addrspace_size, struct gic_intr_map *intrmap,
370 unsigned int intrmap_size, unsigned int irqbase); 374 unsigned int intrmap_size, unsigned int irqbase);
371
372extern void gic_clocksource_init(unsigned int); 375extern void gic_clocksource_init(unsigned int);
373extern unsigned int gic_get_int(void); 376extern unsigned int gic_compare_int (void);
377extern cycle_t gic_read_count(void);
378extern cycle_t gic_read_compare(void);
379extern void gic_write_compare(cycle_t cnt);
374extern void gic_send_ipi(unsigned int intr); 380extern void gic_send_ipi(unsigned int intr);
375extern unsigned int plat_ipi_call_int_xlate(unsigned int); 381extern unsigned int plat_ipi_call_int_xlate(unsigned int);
376extern unsigned int plat_ipi_resched_int_xlate(unsigned int); 382extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
377extern void gic_bind_eic_interrupt(int irq, int set); 383extern void gic_bind_eic_interrupt(int irq, int set);
378extern unsigned int gic_get_timer_pending(void); 384extern unsigned int gic_get_timer_pending(void);
385extern unsigned int gic_get_int(void);
379extern void gic_enable_interrupt(int irq_vec); 386extern void gic_enable_interrupt(int irq_vec);
380extern void gic_disable_interrupt(int irq_vec); 387extern void gic_disable_interrupt(int irq_vec);
381extern void gic_irq_ack(struct irq_data *d); 388extern void gic_irq_ack(struct irq_data *d);
382extern void gic_finish_irq(struct irq_data *d); 389extern void gic_finish_irq(struct irq_data *d);
383extern void gic_platform_init(int irqs, struct irq_chip *irq_controller); 390extern void gic_platform_init(int irqs, struct irq_chip *irq_controller);
384
385#endif /* _ASM_GICREGS_H */ 391#endif /* _ASM_GICREGS_H */
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index 44d6a5bde4a1..e3ee92d4dbe7 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -10,34 +10,13 @@
10#ifndef _ASM_HAZARDS_H 10#ifndef _ASM_HAZARDS_H
11#define _ASM_HAZARDS_H 11#define _ASM_HAZARDS_H
12 12
13#ifdef __ASSEMBLY__ 13#include <linux/stringify.h>
14#define ASMMACRO(name, code...) .macro name; code; .endm
15#else
16
17#include <asm/cpu-features.h>
18
19#define ASMMACRO(name, code...) \
20__asm__(".macro " #name "; " #code "; .endm"); \
21 \
22static inline void name(void) \
23{ \
24 __asm__ __volatile__ (#name); \
25}
26
27/*
28 * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine.
29 */
30extern void mips_ihb(void);
31
32#endif
33 14
34ASMMACRO(_ssnop, 15#define ___ssnop \
35 sll $0, $0, 1 16 sll $0, $0, 1
36 )
37 17
38ASMMACRO(_ehb, 18#define ___ehb \
39 sll $0, $0, 3 19 sll $0, $0, 3
40 )
41 20
42/* 21/*
43 * TLB hazards 22 * TLB hazards
@@ -48,24 +27,24 @@ ASMMACRO(_ehb,
48 * MIPSR2 defines ehb for hazard avoidance 27 * MIPSR2 defines ehb for hazard avoidance
49 */ 28 */
50 29
51ASMMACRO(mtc0_tlbw_hazard, 30#define __mtc0_tlbw_hazard \
52 _ehb 31 ___ehb
53 ) 32
54ASMMACRO(tlbw_use_hazard, 33#define __tlbw_use_hazard \
55 _ehb 34 ___ehb
56 ) 35
57ASMMACRO(tlb_probe_hazard, 36#define __tlb_probe_hazard \
58 _ehb 37 ___ehb
59 ) 38
60ASMMACRO(irq_enable_hazard, 39#define __irq_enable_hazard \
61 _ehb 40 ___ehb
62 ) 41
63ASMMACRO(irq_disable_hazard, 42#define __irq_disable_hazard \
64 _ehb 43 ___ehb
65 ) 44
66ASMMACRO(back_to_back_c0_hazard, 45#define __back_to_back_c0_hazard \
67 _ehb 46 ___ehb
68 ) 47
69/* 48/*
70 * gcc has a tradition of misscompiling the previous construct using the 49 * gcc has a tradition of misscompiling the previous construct using the
71 * address of a label as argument to inline assembler. Gas otoh has the 50 * address of a label as argument to inline assembler. Gas otoh has the
@@ -94,24 +73,42 @@ do { \
94 * These are slightly complicated by the fact that we guarantee R1 kernels to 73 * These are slightly complicated by the fact that we guarantee R1 kernels to
95 * run fine on R2 processors. 74 * run fine on R2 processors.
96 */ 75 */
97ASMMACRO(mtc0_tlbw_hazard, 76
98 _ssnop; _ssnop; _ehb 77#define __mtc0_tlbw_hazard \
99 ) 78 ___ssnop; \
100ASMMACRO(tlbw_use_hazard, 79 ___ssnop; \
101 _ssnop; _ssnop; _ssnop; _ehb 80 ___ehb
102 ) 81
103ASMMACRO(tlb_probe_hazard, 82#define __tlbw_use_hazard \
104 _ssnop; _ssnop; _ssnop; _ehb 83 ___ssnop; \
105 ) 84 ___ssnop; \
106ASMMACRO(irq_enable_hazard, 85 ___ssnop; \
107 _ssnop; _ssnop; _ssnop; _ehb 86 ___ehb
108 ) 87
109ASMMACRO(irq_disable_hazard, 88#define __tlb_probe_hazard \
110 _ssnop; _ssnop; _ssnop; _ehb 89 ___ssnop; \
111 ) 90 ___ssnop; \
112ASMMACRO(back_to_back_c0_hazard, 91 ___ssnop; \
113 _ssnop; _ssnop; _ssnop; _ehb 92 ___ehb
114 ) 93
94#define __irq_enable_hazard \
95 ___ssnop; \
96 ___ssnop; \
97 ___ssnop; \
98 ___ehb
99
100#define __irq_disable_hazard \
101 ___ssnop; \
102 ___ssnop; \
103 ___ssnop; \
104 ___ehb
105
106#define __back_to_back_c0_hazard \
107 ___ssnop; \
108 ___ssnop; \
109 ___ssnop; \
110 ___ehb
111
115/* 112/*
116 * gcc has a tradition of misscompiling the previous construct using the 113 * gcc has a tradition of misscompiling the previous construct using the
117 * address of a label as argument to inline assembler. Gas otoh has the 114 * address of a label as argument to inline assembler. Gas otoh has the
@@ -147,18 +144,18 @@ do { \
147 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. 144 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
148 */ 145 */
149 146
150ASMMACRO(mtc0_tlbw_hazard, 147#define __mtc0_tlbw_hazard
151 ) 148
152ASMMACRO(tlbw_use_hazard, 149#define __tlbw_use_hazard
153 ) 150
154ASMMACRO(tlb_probe_hazard, 151#define __tlb_probe_hazard
155 ) 152
156ASMMACRO(irq_enable_hazard, 153#define __irq_enable_hazard
157 ) 154
158ASMMACRO(irq_disable_hazard, 155#define __irq_disable_hazard
159 ) 156
160ASMMACRO(back_to_back_c0_hazard, 157#define __back_to_back_c0_hazard
161 ) 158
162#define instruction_hazard() do { } while (0) 159#define instruction_hazard() do { } while (0)
163 160
164#elif defined(CONFIG_CPU_SB1) 161#elif defined(CONFIG_CPU_SB1)
@@ -166,19 +163,21 @@ ASMMACRO(back_to_back_c0_hazard,
166/* 163/*
167 * Mostly like R4000 for historic reasons 164 * Mostly like R4000 for historic reasons
168 */ 165 */
169ASMMACRO(mtc0_tlbw_hazard, 166#define __mtc0_tlbw_hazard
170 ) 167
171ASMMACRO(tlbw_use_hazard, 168#define __tlbw_use_hazard
172 ) 169
173ASMMACRO(tlb_probe_hazard, 170#define __tlb_probe_hazard
174 ) 171
175ASMMACRO(irq_enable_hazard, 172#define __irq_enable_hazard
176 ) 173
177ASMMACRO(irq_disable_hazard, 174#define __irq_disable_hazard \
178 _ssnop; _ssnop; _ssnop 175 ___ssnop; \
179 ) 176 ___ssnop; \
180ASMMACRO(back_to_back_c0_hazard, 177 ___ssnop
181 ) 178
179#define __back_to_back_c0_hazard
180
182#define instruction_hazard() do { } while (0) 181#define instruction_hazard() do { } while (0)
183 182
184#else 183#else
@@ -192,24 +191,35 @@ ASMMACRO(back_to_back_c0_hazard,
192 * hazard so this is nice trick to have an optimal code for a range of 191 * hazard so this is nice trick to have an optimal code for a range of
193 * processors. 192 * processors.
194 */ 193 */
195ASMMACRO(mtc0_tlbw_hazard, 194#define __mtc0_tlbw_hazard \
196 nop; nop 195 nop; \
197 ) 196 nop
198ASMMACRO(tlbw_use_hazard, 197
199 nop; nop; nop 198#define __tlbw_use_hazard \
200 ) 199 nop; \
201ASMMACRO(tlb_probe_hazard, 200 nop; \
202 nop; nop; nop 201 nop
203 ) 202
204ASMMACRO(irq_enable_hazard, 203#define __tlb_probe_hazard \
205 _ssnop; _ssnop; _ssnop; 204 nop; \
206 ) 205 nop; \
207ASMMACRO(irq_disable_hazard, 206 nop
208 nop; nop; nop 207
209 ) 208#define __irq_enable_hazard \
210ASMMACRO(back_to_back_c0_hazard, 209 ___ssnop; \
211 _ssnop; _ssnop; _ssnop; 210 ___ssnop; \
212 ) 211 ___ssnop
212
213#define __irq_disable_hazard \
214 nop; \
215 nop; \
216 nop
217
218#define __back_to_back_c0_hazard \
219 ___ssnop; \
220 ___ssnop; \
221 ___ssnop
222
213#define instruction_hazard() do { } while (0) 223#define instruction_hazard() do { } while (0)
214 224
215#endif 225#endif
@@ -218,32 +228,137 @@ ASMMACRO(back_to_back_c0_hazard,
218/* FPU hazards */ 228/* FPU hazards */
219 229
220#if defined(CONFIG_CPU_SB1) 230#if defined(CONFIG_CPU_SB1)
221ASMMACRO(enable_fpu_hazard, 231
222 .set push; 232#define __enable_fpu_hazard \
223 .set mips64; 233 .set push; \
224 .set noreorder; 234 .set mips64; \
225 _ssnop; 235 .set noreorder; \
226 bnezl $0, .+4; 236 ___ssnop; \
227 _ssnop; 237 bnezl $0, .+4; \
228 .set pop 238 ___ssnop; \
229) 239 .set pop
230ASMMACRO(disable_fpu_hazard, 240
231) 241#define __disable_fpu_hazard
232 242
233#elif defined(CONFIG_CPU_MIPSR2) 243#elif defined(CONFIG_CPU_MIPSR2)
234ASMMACRO(enable_fpu_hazard, 244
235 _ehb 245#define __enable_fpu_hazard \
236) 246 ___ehb
237ASMMACRO(disable_fpu_hazard, 247
238 _ehb 248#define __disable_fpu_hazard \
239) 249 ___ehb
250
240#else 251#else
241ASMMACRO(enable_fpu_hazard, 252
242 nop; nop; nop; nop 253#define __enable_fpu_hazard \
243) 254 nop; \
244ASMMACRO(disable_fpu_hazard, 255 nop; \
245 _ehb 256 nop; \
246) 257 nop
258
259#define __disable_fpu_hazard \
260 ___ehb
261
247#endif 262#endif
248 263
264#ifdef __ASSEMBLY__
265
266#define _ssnop ___ssnop
267#define _ehb ___ehb
268#define mtc0_tlbw_hazard __mtc0_tlbw_hazard
269#define tlbw_use_hazard __tlbw_use_hazard
270#define tlb_probe_hazard __tlb_probe_hazard
271#define irq_enable_hazard __irq_enable_hazard
272#define irq_disable_hazard __irq_disable_hazard
273#define back_to_back_c0_hazard __back_to_back_c0_hazard
274#define enable_fpu_hazard __enable_fpu_hazard
275#define disable_fpu_hazard __disable_fpu_hazard
276
277#else
278
279#define _ssnop() \
280do { \
281 __asm__ __volatile__( \
282 __stringify(___ssnop) \
283 ); \
284} while (0)
285
286#define _ehb() \
287do { \
288 __asm__ __volatile__( \
289 __stringify(___ehb) \
290 ); \
291} while (0)
292
293
294#define mtc0_tlbw_hazard() \
295do { \
296 __asm__ __volatile__( \
297 __stringify(__mtc0_tlbw_hazard) \
298 ); \
299} while (0)
300
301
302#define tlbw_use_hazard() \
303do { \
304 __asm__ __volatile__( \
305 __stringify(__tlbw_use_hazard) \
306 ); \
307} while (0)
308
309
310#define tlb_probe_hazard() \
311do { \
312 __asm__ __volatile__( \
313 __stringify(__tlb_probe_hazard) \
314 ); \
315} while (0)
316
317
318#define irq_enable_hazard() \
319do { \
320 __asm__ __volatile__( \
321 __stringify(__irq_enable_hazard) \
322 ); \
323} while (0)
324
325
326#define irq_disable_hazard() \
327do { \
328 __asm__ __volatile__( \
329 __stringify(__irq_disable_hazard) \
330 ); \
331} while (0)
332
333
334#define back_to_back_c0_hazard() \
335do { \
336 __asm__ __volatile__( \
337 __stringify(__back_to_back_c0_hazard) \
338 ); \
339} while (0)
340
341
342#define enable_fpu_hazard() \
343do { \
344 __asm__ __volatile__( \
345 __stringify(__enable_fpu_hazard) \
346 ); \
347} while (0)
348
349
350#define disable_fpu_hazard() \
351do { \
352 __asm__ __volatile__( \
353 __stringify(__disable_fpu_hazard) \
354 ); \
355} while (0)
356
357/*
358 * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine.
359 */
360extern void mips_ihb(void);
361
362#endif /* __ASSEMBLY__ */
363
249#endif /* _ASM_HAZARDS_H */ 364#endif /* _ASM_HAZARDS_H */
diff --git a/arch/mips/include/asm/inst.h b/arch/mips/include/asm/inst.h
index f1eadf764071..22912f78401c 100644
--- a/arch/mips/include/asm/inst.h
+++ b/arch/mips/include/asm/inst.h
@@ -73,4 +73,16 @@
73 73
74typedef unsigned int mips_instruction; 74typedef unsigned int mips_instruction;
75 75
76/* microMIPS instruction decode structure. Do NOT export!!! */
77struct mm_decoded_insn {
78 mips_instruction insn;
79 mips_instruction next_insn;
80 int pc_inc;
81 int next_pc_inc;
82 int micro_mips_mode;
83};
84
85/* Recode table from 16-bit register notation to 32-bit GPR. Do NOT export!!! */
86extern const int reg16to32[];
87
76#endif /* _ASM_INST_H */ 88#endif /* _ASM_INST_H */
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 9f3384c789d7..45c00951888b 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -14,53 +14,48 @@
14#ifndef __ASSEMBLY__ 14#ifndef __ASSEMBLY__
15 15
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/stringify.h>
17#include <asm/hazards.h> 18#include <asm/hazards.h>
18 19
19#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) 20#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
20 21
21__asm__( 22static inline void arch_local_irq_disable(void)
22 " .macro arch_local_irq_disable\n" 23{
24 __asm__ __volatile__(
23 " .set push \n" 25 " .set push \n"
24 " .set noat \n" 26 " .set noat \n"
25 " di \n" 27 " di \n"
26 " irq_disable_hazard \n" 28 " " __stringify(__irq_disable_hazard) " \n"
27 " .set pop \n" 29 " .set pop \n"
28 " .endm \n"); 30 : /* no outputs */
29 31 : /* no inputs */
30static inline void arch_local_irq_disable(void) 32 : "memory");
31{
32 __asm__ __volatile__(
33 "arch_local_irq_disable"
34 : /* no outputs */
35 : /* no inputs */
36 : "memory");
37} 33}
38 34
35static inline unsigned long arch_local_irq_save(void)
36{
37 unsigned long flags;
39 38
40__asm__( 39 asm __volatile__(
41 " .macro arch_local_irq_save result \n"
42 " .set push \n" 40 " .set push \n"
43 " .set reorder \n" 41 " .set reorder \n"
44 " .set noat \n" 42 " .set noat \n"
45 " di \\result \n" 43 " di %[flags] \n"
46 " andi \\result, 1 \n" 44 " andi %[flags], 1 \n"
47 " irq_disable_hazard \n" 45 " " __stringify(__irq_disable_hazard) " \n"
48 " .set pop \n" 46 " .set pop \n"
49 " .endm \n"); 47 : [flags] "=r" (flags)
48 : /* no inputs */
49 : "memory");
50 50
51static inline unsigned long arch_local_irq_save(void)
52{
53 unsigned long flags;
54 asm volatile("arch_local_irq_save\t%0"
55 : "=r" (flags)
56 : /* no inputs */
57 : "memory");
58 return flags; 51 return flags;
59} 52}
60 53
54static inline void arch_local_irq_restore(unsigned long flags)
55{
56 unsigned long __tmp1;
61 57
62__asm__( 58 __asm__ __volatile__(
63 " .macro arch_local_irq_restore flags \n"
64 " .set push \n" 59 " .set push \n"
65 " .set noreorder \n" 60 " .set noreorder \n"
66 " .set noat \n" 61 " .set noat \n"
@@ -69,7 +64,7 @@ __asm__(
69 * Slow, but doesn't suffer from a relatively unlikely race 64 * Slow, but doesn't suffer from a relatively unlikely race
70 * condition we're having since days 1. 65 * condition we're having since days 1.
71 */ 66 */
72 " beqz \\flags, 1f \n" 67 " beqz %[flags], 1f \n"
73 " di \n" 68 " di \n"
74 " ei \n" 69 " ei \n"
75 "1: \n" 70 "1: \n"
@@ -78,33 +73,44 @@ __asm__(
78 * Fast, dangerous. Life is fun, life is good. 73 * Fast, dangerous. Life is fun, life is good.
79 */ 74 */
80 " mfc0 $1, $12 \n" 75 " mfc0 $1, $12 \n"
81 " ins $1, \\flags, 0, 1 \n" 76 " ins $1, %[flags], 0, 1 \n"
82 " mtc0 $1, $12 \n" 77 " mtc0 $1, $12 \n"
83#endif 78#endif
84 " irq_disable_hazard \n" 79 " " __stringify(__irq_disable_hazard) " \n"
85 " .set pop \n" 80 " .set pop \n"
86 " .endm \n"); 81 : [flags] "=r" (__tmp1)
87 82 : "0" (flags)
88static inline void arch_local_irq_restore(unsigned long flags) 83 : "memory");
89{
90 unsigned long __tmp1;
91
92 __asm__ __volatile__(
93 "arch_local_irq_restore\t%0"
94 : "=r" (__tmp1)
95 : "0" (flags)
96 : "memory");
97} 84}
98 85
99static inline void __arch_local_irq_restore(unsigned long flags) 86static inline void __arch_local_irq_restore(unsigned long flags)
100{ 87{
101 unsigned long __tmp1;
102
103 __asm__ __volatile__( 88 __asm__ __volatile__(
104 "arch_local_irq_restore\t%0" 89 " .set push \n"
105 : "=r" (__tmp1) 90 " .set noreorder \n"
106 : "0" (flags) 91 " .set noat \n"
107 : "memory"); 92#if defined(CONFIG_IRQ_CPU)
93 /*
94 * Slow, but doesn't suffer from a relatively unlikely race
95 * condition we're having since days 1.
96 */
97 " beqz %[flags], 1f \n"
98 " di \n"
99 " ei \n"
100 "1: \n"
101#else
102 /*
103 * Fast, dangerous. Life is fun, life is good.
104 */
105 " mfc0 $1, $12 \n"
106 " ins $1, %[flags], 0, 1 \n"
107 " mtc0 $1, $12 \n"
108#endif
109 " " __stringify(__irq_disable_hazard) " \n"
110 " .set pop \n"
111 : [flags] "=r" (flags)
112 : "0" (flags)
113 : "memory");
108} 114}
109#else 115#else
110/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ 116/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
@@ -115,8 +121,18 @@ void __arch_local_irq_restore(unsigned long flags);
115#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ 121#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
116 122
117 123
118__asm__( 124extern void smtc_ipi_replay(void);
119 " .macro arch_local_irq_enable \n" 125
126static inline void arch_local_irq_enable(void)
127{
128#ifdef CONFIG_MIPS_MT_SMTC
129 /*
130 * SMTC kernel needs to do a software replay of queued
131 * IPIs, at the cost of call overhead on each local_irq_enable()
132 */
133 smtc_ipi_replay();
134#endif
135 __asm__ __volatile__(
120 " .set push \n" 136 " .set push \n"
121 " .set reorder \n" 137 " .set reorder \n"
122 " .set noat \n" 138 " .set noat \n"
@@ -133,45 +149,28 @@ __asm__(
133 " xori $1,0x1e \n" 149 " xori $1,0x1e \n"
134 " mtc0 $1,$12 \n" 150 " mtc0 $1,$12 \n"
135#endif 151#endif
136 " irq_enable_hazard \n" 152 " " __stringify(__irq_enable_hazard) " \n"
137 " .set pop \n" 153 " .set pop \n"
138 " .endm"); 154 : /* no outputs */
139 155 : /* no inputs */
140extern void smtc_ipi_replay(void); 156 : "memory");
141
142static inline void arch_local_irq_enable(void)
143{
144#ifdef CONFIG_MIPS_MT_SMTC
145 /*
146 * SMTC kernel needs to do a software replay of queued
147 * IPIs, at the cost of call overhead on each local_irq_enable()
148 */
149 smtc_ipi_replay();
150#endif
151 __asm__ __volatile__(
152 "arch_local_irq_enable"
153 : /* no outputs */
154 : /* no inputs */
155 : "memory");
156} 157}
157 158
159static inline unsigned long arch_local_save_flags(void)
160{
161 unsigned long flags;
158 162
159__asm__( 163 asm __volatile__(
160 " .macro arch_local_save_flags flags \n"
161 " .set push \n" 164 " .set push \n"
162 " .set reorder \n" 165 " .set reorder \n"
163#ifdef CONFIG_MIPS_MT_SMTC 166#ifdef CONFIG_MIPS_MT_SMTC
164 " mfc0 \\flags, $2, 1 \n" 167 " mfc0 %[flags], $2, 1 \n"
165#else 168#else
166 " mfc0 \\flags, $12 \n" 169 " mfc0 %[flags], $12 \n"
167#endif 170#endif
168 " .set pop \n" 171 " .set pop \n"
169 " .endm \n"); 172 : [flags] "=r" (flags));
170 173
171static inline unsigned long arch_local_save_flags(void)
172{
173 unsigned long flags;
174 asm volatile("arch_local_save_flags %0" : "=r" (flags));
175 return flags; 174 return flags;
176} 175}
177 176
diff --git a/arch/mips/include/asm/kvm.h b/arch/mips/include/asm/kvm.h
new file mode 100644
index 000000000000..85789eacbf18
--- /dev/null
+++ b/arch/mips/include/asm/kvm.h
@@ -0,0 +1,55 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10#ifndef __LINUX_KVM_MIPS_H
11#define __LINUX_KVM_MIPS_H
12
13#include <linux/types.h>
14
15#define __KVM_MIPS
16
17#define N_MIPS_COPROC_REGS 32
18#define N_MIPS_COPROC_SEL 8
19
20/* for KVM_GET_REGS and KVM_SET_REGS */
21struct kvm_regs {
22 __u32 gprs[32];
23 __u32 hi;
24 __u32 lo;
25 __u32 pc;
26
27 __u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
28};
29
30/* for KVM_GET_SREGS and KVM_SET_SREGS */
31struct kvm_sregs {
32};
33
34/* for KVM_GET_FPU and KVM_SET_FPU */
35struct kvm_fpu {
36};
37
38struct kvm_debug_exit_arch {
39};
40
41/* for KVM_SET_GUEST_DEBUG */
42struct kvm_guest_debug_arch {
43};
44
45struct kvm_mips_interrupt {
46 /* in */
47 __u32 cpu;
48 __u32 irq;
49};
50
51/* definition of registers in kvm_run */
52struct kvm_sync_regs {
53};
54
55#endif /* __LINUX_KVM_MIPS_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
new file mode 100644
index 000000000000..e68781e18387
--- /dev/null
+++ b/arch/mips/include/asm/kvm_host.h
@@ -0,0 +1,667 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10#ifndef __MIPS_KVM_HOST_H__
11#define __MIPS_KVM_HOST_H__
12
13#include <linux/mutex.h>
14#include <linux/hrtimer.h>
15#include <linux/interrupt.h>
16#include <linux/types.h>
17#include <linux/kvm.h>
18#include <linux/kvm_types.h>
19#include <linux/threads.h>
20#include <linux/spinlock.h>
21
22
23#define KVM_MAX_VCPUS 1
24#define KVM_USER_MEM_SLOTS 8
25/* memory slots that does not exposed to userspace */
26#define KVM_PRIVATE_MEM_SLOTS 0
27
28#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
29
30/* Don't support huge pages */
31#define KVM_HPAGE_GFN_SHIFT(x) 0
32
33/* We don't currently support large pages. */
34#define KVM_NR_PAGE_SIZES 1
35#define KVM_PAGES_PER_HPAGE(x) 1
36
37
38
39/* Special address that contains the comm page, used for reducing # of traps */
40#define KVM_GUEST_COMMPAGE_ADDR 0x0
41
42#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
43 ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
44
45#define KVM_GUEST_KUSEG 0x00000000UL
46#define KVM_GUEST_KSEG0 0x40000000UL
47#define KVM_GUEST_KSEG23 0x60000000UL
48#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000)
49#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
50
51#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
52#define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
53#define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
54
55/*
56 * Map an address to a certain kernel segment
57 */
58#define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
59#define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
60#define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
61
62#define KVM_INVALID_PAGE 0xdeadbeef
63#define KVM_INVALID_INST 0xdeadbeef
64#define KVM_INVALID_ADDR 0xdeadbeef
65
66#define KVM_MALTA_GUEST_RTC_ADDR 0xb8000070UL
67
68#define GUEST_TICKS_PER_JIFFY (40000000/HZ)
69#define MS_TO_NS(x) (x * 1E6L)
70
71#define CAUSEB_DC 27
72#define CAUSEF_DC (_ULCAST_(1) << 27)
73
74struct kvm;
75struct kvm_run;
76struct kvm_vcpu;
77struct kvm_interrupt;
78
79extern atomic_t kvm_mips_instance;
80extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
81extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
82extern bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
83
84struct kvm_vm_stat {
85 u32 remote_tlb_flush;
86};
87
88struct kvm_vcpu_stat {
89 u32 wait_exits;
90 u32 cache_exits;
91 u32 signal_exits;
92 u32 int_exits;
93 u32 cop_unusable_exits;
94 u32 tlbmod_exits;
95 u32 tlbmiss_ld_exits;
96 u32 tlbmiss_st_exits;
97 u32 addrerr_st_exits;
98 u32 addrerr_ld_exits;
99 u32 syscall_exits;
100 u32 resvd_inst_exits;
101 u32 break_inst_exits;
102 u32 flush_dcache_exits;
103 u32 halt_wakeup;
104};
105
106enum kvm_mips_exit_types {
107 WAIT_EXITS,
108 CACHE_EXITS,
109 SIGNAL_EXITS,
110 INT_EXITS,
111 COP_UNUSABLE_EXITS,
112 TLBMOD_EXITS,
113 TLBMISS_LD_EXITS,
114 TLBMISS_ST_EXITS,
115 ADDRERR_ST_EXITS,
116 ADDRERR_LD_EXITS,
117 SYSCALL_EXITS,
118 RESVD_INST_EXITS,
119 BREAK_INST_EXITS,
120 FLUSH_DCACHE_EXITS,
121 MAX_KVM_MIPS_EXIT_TYPES
122};
123
124struct kvm_arch_memory_slot {
125};
126
127struct kvm_arch {
128 /* Guest GVA->HPA page table */
129 unsigned long *guest_pmap;
130 unsigned long guest_pmap_npages;
131
132 /* Wired host TLB used for the commpage */
133 int commpage_tlb;
134};
135
136#define N_MIPS_COPROC_REGS 32
137#define N_MIPS_COPROC_SEL 8
138
139struct mips_coproc {
140 unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
141#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
142 unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
143#endif
144};
145
146/*
147 * Coprocessor 0 register names
148 */
149#define MIPS_CP0_TLB_INDEX 0
150#define MIPS_CP0_TLB_RANDOM 1
151#define MIPS_CP0_TLB_LOW 2
152#define MIPS_CP0_TLB_LO0 2
153#define MIPS_CP0_TLB_LO1 3
154#define MIPS_CP0_TLB_CONTEXT 4
155#define MIPS_CP0_TLB_PG_MASK 5
156#define MIPS_CP0_TLB_WIRED 6
157#define MIPS_CP0_HWRENA 7
158#define MIPS_CP0_BAD_VADDR 8
159#define MIPS_CP0_COUNT 9
160#define MIPS_CP0_TLB_HI 10
161#define MIPS_CP0_COMPARE 11
162#define MIPS_CP0_STATUS 12
163#define MIPS_CP0_CAUSE 13
164#define MIPS_CP0_EXC_PC 14
165#define MIPS_CP0_PRID 15
166#define MIPS_CP0_CONFIG 16
167#define MIPS_CP0_LLADDR 17
168#define MIPS_CP0_WATCH_LO 18
169#define MIPS_CP0_WATCH_HI 19
170#define MIPS_CP0_TLB_XCONTEXT 20
171#define MIPS_CP0_ECC 26
172#define MIPS_CP0_CACHE_ERR 27
173#define MIPS_CP0_TAG_LO 28
174#define MIPS_CP0_TAG_HI 29
175#define MIPS_CP0_ERROR_PC 30
176#define MIPS_CP0_DEBUG 23
177#define MIPS_CP0_DEPC 24
178#define MIPS_CP0_PERFCNT 25
179#define MIPS_CP0_ERRCTL 26
180#define MIPS_CP0_DATA_LO 28
181#define MIPS_CP0_DATA_HI 29
182#define MIPS_CP0_DESAVE 31
183
184#define MIPS_CP0_CONFIG_SEL 0
185#define MIPS_CP0_CONFIG1_SEL 1
186#define MIPS_CP0_CONFIG2_SEL 2
187#define MIPS_CP0_CONFIG3_SEL 3
188
189/* Config0 register bits */
190#define CP0C0_M 31
191#define CP0C0_K23 28
192#define CP0C0_KU 25
193#define CP0C0_MDU 20
194#define CP0C0_MM 17
195#define CP0C0_BM 16
196#define CP0C0_BE 15
197#define CP0C0_AT 13
198#define CP0C0_AR 10
199#define CP0C0_MT 7
200#define CP0C0_VI 3
201#define CP0C0_K0 0
202
203/* Config1 register bits */
204#define CP0C1_M 31
205#define CP0C1_MMU 25
206#define CP0C1_IS 22
207#define CP0C1_IL 19
208#define CP0C1_IA 16
209#define CP0C1_DS 13
210#define CP0C1_DL 10
211#define CP0C1_DA 7
212#define CP0C1_C2 6
213#define CP0C1_MD 5
214#define CP0C1_PC 4
215#define CP0C1_WR 3
216#define CP0C1_CA 2
217#define CP0C1_EP 1
218#define CP0C1_FP 0
219
220/* Config2 Register bits */
221#define CP0C2_M 31
222#define CP0C2_TU 28
223#define CP0C2_TS 24
224#define CP0C2_TL 20
225#define CP0C2_TA 16
226#define CP0C2_SU 12
227#define CP0C2_SS 8
228#define CP0C2_SL 4
229#define CP0C2_SA 0
230
231/* Config3 Register bits */
232#define CP0C3_M 31
233#define CP0C3_ISA_ON_EXC 16
234#define CP0C3_ULRI 13
235#define CP0C3_DSPP 10
236#define CP0C3_LPA 7
237#define CP0C3_VEIC 6
238#define CP0C3_VInt 5
239#define CP0C3_SP 4
240#define CP0C3_MT 2
241#define CP0C3_SM 1
242#define CP0C3_TL 0
243
244/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
245#define MIPS_CONFIG0 \
246 ((1 << CP0C0_M) | (0x3 << CP0C0_K0))
247
248/* Have config2, no coprocessor2 attached, no MDMX support attached,
249 no performance counters, watch registers present,
250 no code compression, EJTAG present, no FPU, no watch registers */
251#define MIPS_CONFIG1 \
252((1 << CP0C1_M) | \
253 (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \
254 (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \
255 (0 << CP0C1_FP))
256
257/* Have config3, no tertiary/secondary caches implemented */
258#define MIPS_CONFIG2 \
259((1 << CP0C2_M))
260
261/* No config4, no DSP ASE, no large physaddr (PABITS),
262 no external interrupt controller, no vectored interrupts,
263 no 1kb pages, no SmartMIPS ASE, no trace logic */
264#define MIPS_CONFIG3 \
265((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \
266 (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \
267 (0 << CP0C3_SM) | (0 << CP0C3_TL))
268
269/* MMU types, the first four entries have the same layout as the
270 CP0C0_MT field. */
271enum mips_mmu_types {
272 MMU_TYPE_NONE,
273 MMU_TYPE_R4000,
274 MMU_TYPE_RESERVED,
275 MMU_TYPE_FMT,
276 MMU_TYPE_R3000,
277 MMU_TYPE_R6000,
278 MMU_TYPE_R8000
279};
280
281/*
282 * Trap codes
283 */
284#define T_INT 0 /* Interrupt pending */
285#define T_TLB_MOD 1 /* TLB modified fault */
286#define T_TLB_LD_MISS 2 /* TLB miss on load or ifetch */
287#define T_TLB_ST_MISS 3 /* TLB miss on a store */
288#define T_ADDR_ERR_LD 4 /* Address error on a load or ifetch */
289#define T_ADDR_ERR_ST 5 /* Address error on a store */
290#define T_BUS_ERR_IFETCH 6 /* Bus error on an ifetch */
291#define T_BUS_ERR_LD_ST 7 /* Bus error on a load or store */
292#define T_SYSCALL 8 /* System call */
293#define T_BREAK 9 /* Breakpoint */
294#define T_RES_INST 10 /* Reserved instruction exception */
295#define T_COP_UNUSABLE 11 /* Coprocessor unusable */
296#define T_OVFLOW 12 /* Arithmetic overflow */
297
298/*
299 * Trap definitions added for r4000 port.
300 */
301#define T_TRAP 13 /* Trap instruction */
302#define T_VCEI 14 /* Virtual coherency exception */
303#define T_FPE 15 /* Floating point exception */
304#define T_WATCH 23 /* Watch address reference */
305#define T_VCED 31 /* Virtual coherency data */
306
307/* Resume Flags */
308#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
309#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
310
311#define RESUME_GUEST 0
312#define RESUME_GUEST_DR RESUME_FLAG_DR
313#define RESUME_HOST RESUME_FLAG_HOST
314
315enum emulation_result {
316 EMULATE_DONE, /* no further processing */
317 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
318 EMULATE_FAIL, /* can't emulate this instruction */
319 EMULATE_WAIT, /* WAIT instruction */
320 EMULATE_PRIV_FAIL,
321};
322
323#define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
324#define MIPS3_PG_V 0x00000002 /* Valid */
325#define MIPS3_PG_NV 0x00000000
326#define MIPS3_PG_D 0x00000004 /* Dirty */
327
328#define mips3_paddr_to_tlbpfn(x) \
329 (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
330#define mips3_tlbpfn_to_paddr(x) \
331 ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
332
333#define MIPS3_PG_SHIFT 6
334#define MIPS3_PG_FRAME 0x3fffffc0
335
336#define VPN2_MASK 0xffffe000
337#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
338#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
339#define TLB_ASID(x) (ASID_MASK((x).tlb_hi))
340#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
341
342struct kvm_mips_tlb {
343 long tlb_mask;
344 long tlb_hi;
345 long tlb_lo0;
346 long tlb_lo1;
347};
348
349#define KVM_MIPS_GUEST_TLB_SIZE 64
350struct kvm_vcpu_arch {
351 void *host_ebase, *guest_ebase;
352 unsigned long host_stack;
353 unsigned long host_gp;
354
355 /* Host CP0 registers used when handling exits from guest */
356 unsigned long host_cp0_badvaddr;
357 unsigned long host_cp0_cause;
358 unsigned long host_cp0_epc;
359 unsigned long host_cp0_entryhi;
360 uint32_t guest_inst;
361
362 /* GPRS */
363 unsigned long gprs[32];
364 unsigned long hi;
365 unsigned long lo;
366 unsigned long pc;
367
368 /* FPU State */
369 struct mips_fpu_struct fpu;
370
371 /* COP0 State */
372 struct mips_coproc *cop0;
373
374 /* Host KSEG0 address of the EI/DI offset */
375 void *kseg0_commpage;
376
377 u32 io_gpr; /* GPR used as IO source/target */
378
379 /* Used to calibrate the virutal count register for the guest */
380 int32_t host_cp0_count;
381
382 /* Bitmask of exceptions that are pending */
383 unsigned long pending_exceptions;
384
385 /* Bitmask of pending exceptions to be cleared */
386 unsigned long pending_exceptions_clr;
387
388 unsigned long pending_load_cause;
389
390 /* Save/Restore the entryhi register when are are preempted/scheduled back in */
391 unsigned long preempt_entryhi;
392
393 /* S/W Based TLB for guest */
394 struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
395
396 /* Cached guest kernel/user ASIDs */
397 uint32_t guest_user_asid[NR_CPUS];
398 uint32_t guest_kernel_asid[NR_CPUS];
399 struct mm_struct guest_kernel_mm, guest_user_mm;
400
401 struct kvm_mips_tlb shadow_tlb[NR_CPUS][KVM_MIPS_GUEST_TLB_SIZE];
402
403
404 struct hrtimer comparecount_timer;
405
406 int last_sched_cpu;
407
408 /* WAIT executed */
409 int wait;
410};
411
412
413#define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0])
414#define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
415#define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0])
416#define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0])
417#define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
418#define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
419#define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
420#define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
421#define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
422#define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0])
423#define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
424#define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0])
425#define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
426#define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0])
427#define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val))
428#define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0])
429#define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
430#define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0])
431#define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
432#define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0])
433#define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val))
434#define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1])
435#define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val))
436#define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0])
437#define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
438#define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0])
439#define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
440#define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0])
441#define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val))
442#define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1])
443#define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val))
444#define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0])
445#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
446#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
447#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
448#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
449#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
450#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
451#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
452#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
453#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
454#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
455#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
456
457#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
458#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
459#define kvm_set_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] |= (val))
460#define kvm_clear_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val))
461#define kvm_change_c0_guest_cause(cop0, change, val) \
462{ \
463 kvm_clear_c0_guest_cause(cop0, change); \
464 kvm_set_c0_guest_cause(cop0, ((val) & (change))); \
465}
466#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val))
467#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
468#define kvm_change_c0_guest_ebase(cop0, change, val) \
469{ \
470 kvm_clear_c0_guest_ebase(cop0, change); \
471 kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
472}
473
474
475struct kvm_mips_callbacks {
476 int (*handle_cop_unusable) (struct kvm_vcpu *vcpu);
477 int (*handle_tlb_mod) (struct kvm_vcpu *vcpu);
478 int (*handle_tlb_ld_miss) (struct kvm_vcpu *vcpu);
479 int (*handle_tlb_st_miss) (struct kvm_vcpu *vcpu);
480 int (*handle_addr_err_st) (struct kvm_vcpu *vcpu);
481 int (*handle_addr_err_ld) (struct kvm_vcpu *vcpu);
482 int (*handle_syscall) (struct kvm_vcpu *vcpu);
483 int (*handle_res_inst) (struct kvm_vcpu *vcpu);
484 int (*handle_break) (struct kvm_vcpu *vcpu);
485 int (*vm_init) (struct kvm *kvm);
486 int (*vcpu_init) (struct kvm_vcpu *vcpu);
487 int (*vcpu_setup) (struct kvm_vcpu *vcpu);
488 gpa_t(*gva_to_gpa) (gva_t gva);
489 void (*queue_timer_int) (struct kvm_vcpu *vcpu);
490 void (*dequeue_timer_int) (struct kvm_vcpu *vcpu);
491 void (*queue_io_int) (struct kvm_vcpu *vcpu,
492 struct kvm_mips_interrupt *irq);
493 void (*dequeue_io_int) (struct kvm_vcpu *vcpu,
494 struct kvm_mips_interrupt *irq);
495 int (*irq_deliver) (struct kvm_vcpu *vcpu, unsigned int priority,
496 uint32_t cause);
497 int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
498 uint32_t cause);
499 int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu,
500 struct kvm_regs *regs);
501 int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu,
502 struct kvm_regs *regs);
503};
504extern struct kvm_mips_callbacks *kvm_mips_callbacks;
505int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
506
507/* Debug: dump vcpu state */
508int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
509
510/* Trampoline ASM routine to start running in "Guest" context */
511extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
512
513/* TLB handling */
514uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
515
516uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu);
517
518uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
519
520extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
521 struct kvm_vcpu *vcpu);
522
523extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
524 struct kvm_vcpu *vcpu);
525
526extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
527 struct kvm_mips_tlb *tlb,
528 unsigned long *hpa0,
529 unsigned long *hpa1);
530
531extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
532 uint32_t *opc,
533 struct kvm_run *run,
534 struct kvm_vcpu *vcpu);
535
536extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,
537 uint32_t *opc,
538 struct kvm_run *run,
539 struct kvm_vcpu *vcpu);
540
541extern void kvm_mips_dump_host_tlbs(void);
542extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
543extern void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu);
544extern void kvm_mips_flush_host_tlb(int skip_kseg0);
545extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
546extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index);
547
548extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
549 unsigned long entryhi);
550extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr);
551extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
552 unsigned long gva);
553extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
554 struct kvm_vcpu *vcpu);
555extern void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu);
556extern void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu);
557extern void kvm_local_flush_tlb_all(void);
558extern void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu);
559extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
560extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
561extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
562
563/* Emulation */
564uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu);
565enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause);
566
567extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause,
568 uint32_t *opc,
569 struct kvm_run *run,
570 struct kvm_vcpu *vcpu);
571
572extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
573 uint32_t *opc,
574 struct kvm_run *run,
575 struct kvm_vcpu *vcpu);
576
577extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
578 uint32_t *opc,
579 struct kvm_run *run,
580 struct kvm_vcpu *vcpu);
581
582extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
583 uint32_t *opc,
584 struct kvm_run *run,
585 struct kvm_vcpu *vcpu);
586
587extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
588 uint32_t *opc,
589 struct kvm_run *run,
590 struct kvm_vcpu *vcpu);
591
592extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
593 uint32_t *opc,
594 struct kvm_run *run,
595 struct kvm_vcpu *vcpu);
596
597extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
598 uint32_t *opc,
599 struct kvm_run *run,
600 struct kvm_vcpu *vcpu);
601
602extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
603 uint32_t *opc,
604 struct kvm_run *run,
605 struct kvm_vcpu *vcpu);
606
607extern enum emulation_result kvm_mips_handle_ri(unsigned long cause,
608 uint32_t *opc,
609 struct kvm_run *run,
610 struct kvm_vcpu *vcpu);
611
612extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
613 uint32_t *opc,
614 struct kvm_run *run,
615 struct kvm_vcpu *vcpu);
616
617extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
618 uint32_t *opc,
619 struct kvm_run *run,
620 struct kvm_vcpu *vcpu);
621
622extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
623 struct kvm_run *run);
624
625enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu);
626
627enum emulation_result kvm_mips_check_privilege(unsigned long cause,
628 uint32_t *opc,
629 struct kvm_run *run,
630 struct kvm_vcpu *vcpu);
631
632enum emulation_result kvm_mips_emulate_cache(uint32_t inst,
633 uint32_t *opc,
634 uint32_t cause,
635 struct kvm_run *run,
636 struct kvm_vcpu *vcpu);
637enum emulation_result kvm_mips_emulate_CP0(uint32_t inst,
638 uint32_t *opc,
639 uint32_t cause,
640 struct kvm_run *run,
641 struct kvm_vcpu *vcpu);
642enum emulation_result kvm_mips_emulate_store(uint32_t inst,
643 uint32_t cause,
644 struct kvm_run *run,
645 struct kvm_vcpu *vcpu);
646enum emulation_result kvm_mips_emulate_load(uint32_t inst,
647 uint32_t cause,
648 struct kvm_run *run,
649 struct kvm_vcpu *vcpu);
650
651/* Dynamic binary translation */
652extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
653 struct kvm_vcpu *vcpu);
654extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
655 struct kvm_vcpu *vcpu);
656extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc,
657 struct kvm_vcpu *vcpu);
658extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
659 struct kvm_vcpu *vcpu);
660
661/* Misc */
662extern void mips32_SyncICache(unsigned long addr, unsigned long size);
663extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
664extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
665
666
667#endif /* __MIPS_KVM_HOST_H__ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h
deleted file mode 100644
index 8fcf8df4418a..000000000000
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef BCM63XX_CLK_H_
2#define BCM63XX_CLK_H_
3
4struct clk {
5 void (*set)(struct clk *, int);
6 unsigned int rate;
7 unsigned int usage;
8 int id;
9};
10
11#endif /* ! BCM63XX_CLK_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
index cb922b9cb0e9..336228990808 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
@@ -14,11 +14,12 @@
14#define BCM6345_CPU_ID 0x6345 14#define BCM6345_CPU_ID 0x6345
15#define BCM6348_CPU_ID 0x6348 15#define BCM6348_CPU_ID 0x6348
16#define BCM6358_CPU_ID 0x6358 16#define BCM6358_CPU_ID 0x6358
17#define BCM6362_CPU_ID 0x6362
17#define BCM6368_CPU_ID 0x6368 18#define BCM6368_CPU_ID 0x6368
18 19
19void __init bcm63xx_cpu_init(void); 20void __init bcm63xx_cpu_init(void);
20u16 __bcm63xx_get_cpu_id(void); 21u16 __bcm63xx_get_cpu_id(void);
21u16 bcm63xx_get_cpu_rev(void); 22u8 bcm63xx_get_cpu_rev(void);
22unsigned int bcm63xx_get_cpu_freq(void); 23unsigned int bcm63xx_get_cpu_freq(void);
23 24
24#ifdef CONFIG_BCM63XX_CPU_6328 25#ifdef CONFIG_BCM63XX_CPU_6328
@@ -86,6 +87,20 @@ unsigned int bcm63xx_get_cpu_freq(void);
86# define BCMCPU_IS_6358() (0) 87# define BCMCPU_IS_6358() (0)
87#endif 88#endif
88 89
90#ifdef CONFIG_BCM63XX_CPU_6362
91# ifdef bcm63xx_get_cpu_id
92# undef bcm63xx_get_cpu_id
93# define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id()
94# define BCMCPU_RUNTIME_DETECT
95# else
96# define bcm63xx_get_cpu_id() BCM6362_CPU_ID
97# endif
98# define BCMCPU_IS_6362() (bcm63xx_get_cpu_id() == BCM6362_CPU_ID)
99#else
100# define BCMCPU_IS_6362() (0)
101#endif
102
103
89#ifdef CONFIG_BCM63XX_CPU_6368 104#ifdef CONFIG_BCM63XX_CPU_6368
90# ifdef bcm63xx_get_cpu_id 105# ifdef bcm63xx_get_cpu_id
91# undef bcm63xx_get_cpu_id 106# undef bcm63xx_get_cpu_id
@@ -406,6 +421,62 @@ enum bcm63xx_regs_set {
406 421
407 422
408/* 423/*
424 * 6362 register sets base address
425 */
426#define BCM_6362_DSL_LMEM_BASE (0xdeadbeef)
427#define BCM_6362_PERF_BASE (0xb0000000)
428#define BCM_6362_TIMER_BASE (0xb0000040)
429#define BCM_6362_WDT_BASE (0xb000005c)
430#define BCM_6362_UART0_BASE (0xb0000100)
431#define BCM_6362_UART1_BASE (0xb0000120)
432#define BCM_6362_GPIO_BASE (0xb0000080)
433#define BCM_6362_SPI_BASE (0xb0000800)
434#define BCM_6362_HSSPI_BASE (0xb0001000)
435#define BCM_6362_UDC0_BASE (0xdeadbeef)
436#define BCM_6362_USBDMA_BASE (0xb000c000)
437#define BCM_6362_OHCI0_BASE (0xb0002600)
438#define BCM_6362_OHCI_PRIV_BASE (0xdeadbeef)
439#define BCM_6362_USBH_PRIV_BASE (0xb0002700)
440#define BCM_6362_USBD_BASE (0xb0002400)
441#define BCM_6362_MPI_BASE (0xdeadbeef)
442#define BCM_6362_PCMCIA_BASE (0xdeadbeef)
443#define BCM_6362_PCIE_BASE (0xb0e40000)
444#define BCM_6362_SDRAM_REGS_BASE (0xdeadbeef)
445#define BCM_6362_DSL_BASE (0xdeadbeef)
446#define BCM_6362_UBUS_BASE (0xdeadbeef)
447#define BCM_6362_ENET0_BASE (0xdeadbeef)
448#define BCM_6362_ENET1_BASE (0xdeadbeef)
449#define BCM_6362_ENETDMA_BASE (0xb000d800)
450#define BCM_6362_ENETDMAC_BASE (0xb000da00)
451#define BCM_6362_ENETDMAS_BASE (0xb000dc00)
452#define BCM_6362_ENETSW_BASE (0xb0e00000)
453#define BCM_6362_EHCI0_BASE (0xb0002500)
454#define BCM_6362_SDRAM_BASE (0xdeadbeef)
455#define BCM_6362_MEMC_BASE (0xdeadbeef)
456#define BCM_6362_DDR_BASE (0xb0003000)
457#define BCM_6362_M2M_BASE (0xdeadbeef)
458#define BCM_6362_ATM_BASE (0xdeadbeef)
459#define BCM_6362_XTM_BASE (0xb0007800)
460#define BCM_6362_XTMDMA_BASE (0xb000b800)
461#define BCM_6362_XTMDMAC_BASE (0xdeadbeef)
462#define BCM_6362_XTMDMAS_BASE (0xdeadbeef)
463#define BCM_6362_PCM_BASE (0xb000a800)
464#define BCM_6362_PCMDMA_BASE (0xdeadbeef)
465#define BCM_6362_PCMDMAC_BASE (0xdeadbeef)
466#define BCM_6362_PCMDMAS_BASE (0xdeadbeef)
467#define BCM_6362_RNG_BASE (0xdeadbeef)
468#define BCM_6362_MISC_BASE (0xb0001800)
469
470#define BCM_6362_NAND_REG_BASE (0xb0000200)
471#define BCM_6362_NAND_CACHE_BASE (0xb0000600)
472#define BCM_6362_LED_BASE (0xb0001900)
473#define BCM_6362_IPSEC_BASE (0xb0002800)
474#define BCM_6362_IPSEC_DMA_BASE (0xb000d000)
475#define BCM_6362_WLAN_CHIPCOMMON_BASE (0xb0004000)
476#define BCM_6362_WLAN_D11_BASE (0xb0005000)
477#define BCM_6362_WLAN_SHIM_BASE (0xb0007000)
478
479/*
409 * 6368 register sets base address 480 * 6368 register sets base address
410 */ 481 */
411#define BCM_6368_DSL_LMEM_BASE (0xdeadbeef) 482#define BCM_6368_DSL_LMEM_BASE (0xdeadbeef)
@@ -564,6 +635,9 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
564#ifdef CONFIG_BCM63XX_CPU_6358 635#ifdef CONFIG_BCM63XX_CPU_6358
565 __GEN_RSET(6358) 636 __GEN_RSET(6358)
566#endif 637#endif
638#ifdef CONFIG_BCM63XX_CPU_6362
639 __GEN_RSET(6362)
640#endif
567#ifdef CONFIG_BCM63XX_CPU_6368 641#ifdef CONFIG_BCM63XX_CPU_6368
568 __GEN_RSET(6368) 642 __GEN_RSET(6368)
569#endif 643#endif
@@ -820,6 +894,71 @@ enum bcm63xx_irq {
820#define BCM_6358_EXT_IRQ3 (IRQ_INTERNAL_BASE + 28) 894#define BCM_6358_EXT_IRQ3 (IRQ_INTERNAL_BASE + 28)
821 895
822/* 896/*
897 * 6362 irqs
898 */
899#define BCM_6362_HIGH_IRQ_BASE (IRQ_INTERNAL_BASE + 32)
900
901#define BCM_6362_TIMER_IRQ (IRQ_INTERNAL_BASE + 0)
902#define BCM_6362_SPI_IRQ (IRQ_INTERNAL_BASE + 2)
903#define BCM_6362_UART0_IRQ (IRQ_INTERNAL_BASE + 3)
904#define BCM_6362_UART1_IRQ (IRQ_INTERNAL_BASE + 4)
905#define BCM_6362_DSL_IRQ (IRQ_INTERNAL_BASE + 28)
906#define BCM_6362_UDC0_IRQ 0
907#define BCM_6362_ENET0_IRQ 0
908#define BCM_6362_ENET1_IRQ 0
909#define BCM_6362_ENET_PHY_IRQ (IRQ_INTERNAL_BASE + 14)
910#define BCM_6362_HSSPI_IRQ (IRQ_INTERNAL_BASE + 5)
911#define BCM_6362_OHCI0_IRQ (IRQ_INTERNAL_BASE + 9)
912#define BCM_6362_EHCI0_IRQ (IRQ_INTERNAL_BASE + 10)
913#define BCM_6362_USBD_IRQ (IRQ_INTERNAL_BASE + 11)
914#define BCM_6362_USBD_RXDMA0_IRQ (IRQ_INTERNAL_BASE + 20)
915#define BCM_6362_USBD_TXDMA0_IRQ (IRQ_INTERNAL_BASE + 21)
916#define BCM_6362_USBD_RXDMA1_IRQ (IRQ_INTERNAL_BASE + 22)
917#define BCM_6362_USBD_TXDMA1_IRQ (IRQ_INTERNAL_BASE + 23)
918#define BCM_6362_USBD_RXDMA2_IRQ (IRQ_INTERNAL_BASE + 24)
919#define BCM_6362_USBD_TXDMA2_IRQ (IRQ_INTERNAL_BASE + 25)
920#define BCM_6362_PCMCIA_IRQ 0
921#define BCM_6362_ENET0_RXDMA_IRQ 0
922#define BCM_6362_ENET0_TXDMA_IRQ 0
923#define BCM_6362_ENET1_RXDMA_IRQ 0
924#define BCM_6362_ENET1_TXDMA_IRQ 0
925#define BCM_6362_PCI_IRQ (IRQ_INTERNAL_BASE + 30)
926#define BCM_6362_ATM_IRQ 0
927#define BCM_6362_ENETSW_RXDMA0_IRQ (BCM_6362_HIGH_IRQ_BASE + 0)
928#define BCM_6362_ENETSW_RXDMA1_IRQ (BCM_6362_HIGH_IRQ_BASE + 1)
929#define BCM_6362_ENETSW_RXDMA2_IRQ (BCM_6362_HIGH_IRQ_BASE + 2)
930#define BCM_6362_ENETSW_RXDMA3_IRQ (BCM_6362_HIGH_IRQ_BASE + 3)
931#define BCM_6362_ENETSW_TXDMA0_IRQ 0
932#define BCM_6362_ENETSW_TXDMA1_IRQ 0
933#define BCM_6362_ENETSW_TXDMA2_IRQ 0
934#define BCM_6362_ENETSW_TXDMA3_IRQ 0
935#define BCM_6362_XTM_IRQ 0
936#define BCM_6362_XTM_DMA0_IRQ (BCM_6362_HIGH_IRQ_BASE + 12)
937
938#define BCM_6362_RING_OSC_IRQ (IRQ_INTERNAL_BASE + 1)
939#define BCM_6362_WLAN_GPIO_IRQ (IRQ_INTERNAL_BASE + 6)
940#define BCM_6362_WLAN_IRQ (IRQ_INTERNAL_BASE + 7)
941#define BCM_6362_IPSEC_IRQ (IRQ_INTERNAL_BASE + 8)
942#define BCM_6362_NAND_IRQ (IRQ_INTERNAL_BASE + 12)
943#define BCM_6362_PCM_IRQ (IRQ_INTERNAL_BASE + 13)
944#define BCM_6362_DG_IRQ (IRQ_INTERNAL_BASE + 15)
945#define BCM_6362_EPHY_ENERGY0_IRQ (IRQ_INTERNAL_BASE + 16)
946#define BCM_6362_EPHY_ENERGY1_IRQ (IRQ_INTERNAL_BASE + 17)
947#define BCM_6362_EPHY_ENERGY2_IRQ (IRQ_INTERNAL_BASE + 18)
948#define BCM_6362_EPHY_ENERGY3_IRQ (IRQ_INTERNAL_BASE + 19)
949#define BCM_6362_IPSEC_DMA0_IRQ (IRQ_INTERNAL_BASE + 26)
950#define BCM_6362_IPSEC_DMA1_IRQ (IRQ_INTERNAL_BASE + 27)
951#define BCM_6362_FAP0_IRQ (IRQ_INTERNAL_BASE + 29)
952#define BCM_6362_PCM_DMA0_IRQ (BCM_6362_HIGH_IRQ_BASE + 4)
953#define BCM_6362_PCM_DMA1_IRQ (BCM_6362_HIGH_IRQ_BASE + 5)
954#define BCM_6362_DECT0_IRQ (BCM_6362_HIGH_IRQ_BASE + 6)
955#define BCM_6362_DECT1_IRQ (BCM_6362_HIGH_IRQ_BASE + 7)
956#define BCM_6362_EXT_IRQ0 (BCM_6362_HIGH_IRQ_BASE + 8)
957#define BCM_6362_EXT_IRQ1 (BCM_6362_HIGH_IRQ_BASE + 9)
958#define BCM_6362_EXT_IRQ2 (BCM_6362_HIGH_IRQ_BASE + 10)
959#define BCM_6362_EXT_IRQ3 (BCM_6362_HIGH_IRQ_BASE + 11)
960
961/*
823 * 6368 irqs 962 * 6368 irqs
824 */ 963 */
825#define BCM_6368_HIGH_IRQ_BASE (IRQ_INTERNAL_BASE + 32) 964#define BCM_6368_HIGH_IRQ_BASE (IRQ_INTERNAL_BASE + 32)
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
index b0184cf02575..c426cabc620a 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
@@ -71,18 +71,13 @@ static inline unsigned long bcm63xx_spireg(enum bcm63xx_regs_spi reg)
71 71
72 return bcm63xx_regs_spi[reg]; 72 return bcm63xx_regs_spi[reg];
73#else 73#else
74#ifdef CONFIG_BCM63XX_CPU_6338 74#if defined(CONFIG_BCM63XX_CPU_6338) || defined(CONFIG_BCM63XX_CPU_6348)
75 __GEN_SPI_RSET(6338)
76#endif
77#ifdef CONFIG_BCM63XX_CPU_6348
78 __GEN_SPI_RSET(6348) 75 __GEN_SPI_RSET(6348)
79#endif 76#endif
80#ifdef CONFIG_BCM63XX_CPU_6358 77#if defined(CONFIG_BCM63XX_CPU_6358) || defined(CONFIG_BCM63XX_CPU_6362) || \
78 defined(CONFIG_BCM63XX_CPU_6368)
81 __GEN_SPI_RSET(6358) 79 __GEN_SPI_RSET(6358)
82#endif 80#endif
83#ifdef CONFIG_BCM63XX_CPU_6368
84 __GEN_SPI_RSET(6368)
85#endif
86#endif 81#endif
87 return 0; 82 return 0;
88} 83}
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
index 0a9891f7580d..35baa1a60a64 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
@@ -17,6 +17,8 @@ static inline unsigned long bcm63xx_gpio_count(void)
17 return 8; 17 return 8;
18 case BCM6345_CPU_ID: 18 case BCM6345_CPU_ID:
19 return 16; 19 return 16;
20 case BCM6362_CPU_ID:
21 return 48;
20 case BCM6368_CPU_ID: 22 case BCM6368_CPU_ID:
21 return 38; 23 return 38;
22 case BCM6348_CPU_ID: 24 case BCM6348_CPU_ID:
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
index 81b4702f792a..3203fe49b34d 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
@@ -10,7 +10,7 @@
10#define REV_CHIPID_SHIFT 16 10#define REV_CHIPID_SHIFT 16
11#define REV_CHIPID_MASK (0xffff << REV_CHIPID_SHIFT) 11#define REV_CHIPID_MASK (0xffff << REV_CHIPID_SHIFT)
12#define REV_REVID_SHIFT 0 12#define REV_REVID_SHIFT 0
13#define REV_REVID_MASK (0xffff << REV_REVID_SHIFT) 13#define REV_REVID_MASK (0xff << REV_REVID_SHIFT)
14 14
15/* Clock Control register */ 15/* Clock Control register */
16#define PERF_CKCTL_REG 0x4 16#define PERF_CKCTL_REG 0x4
@@ -112,6 +112,39 @@
112 CKCTL_6358_USBSU_EN | \ 112 CKCTL_6358_USBSU_EN | \
113 CKCTL_6358_EPHY_EN) 113 CKCTL_6358_EPHY_EN)
114 114
115#define CKCTL_6362_ADSL_QPROC_EN (1 << 1)
116#define CKCTL_6362_ADSL_AFE_EN (1 << 2)
117#define CKCTL_6362_ADSL_EN (1 << 3)
118#define CKCTL_6362_MIPS_EN (1 << 4)
119#define CKCTL_6362_WLAN_OCP_EN (1 << 5)
120#define CKCTL_6362_SWPKT_USB_EN (1 << 7)
121#define CKCTL_6362_SWPKT_SAR_EN (1 << 8)
122#define CKCTL_6362_SAR_EN (1 << 9)
123#define CKCTL_6362_ROBOSW_EN (1 << 10)
124#define CKCTL_6362_PCM_EN (1 << 11)
125#define CKCTL_6362_USBD_EN (1 << 12)
126#define CKCTL_6362_USBH_EN (1 << 13)
127#define CKCTL_6362_IPSEC_EN (1 << 14)
128#define CKCTL_6362_SPI_EN (1 << 15)
129#define CKCTL_6362_HSSPI_EN (1 << 16)
130#define CKCTL_6362_PCIE_EN (1 << 17)
131#define CKCTL_6362_FAP_EN (1 << 18)
132#define CKCTL_6362_PHYMIPS_EN (1 << 19)
133#define CKCTL_6362_NAND_EN (1 << 20)
134
135#define CKCTL_6362_ALL_SAFE_EN (CKCTL_6362_PHYMIPS_EN | \
136 CKCTL_6362_ADSL_QPROC_EN | \
137 CKCTL_6362_ADSL_AFE_EN | \
138 CKCTL_6362_ADSL_EN | \
139 CKCTL_6362_SAR_EN | \
140 CKCTL_6362_PCM_EN | \
141 CKCTL_6362_IPSEC_EN | \
142 CKCTL_6362_USBD_EN | \
143 CKCTL_6362_USBH_EN | \
144 CKCTL_6362_ROBOSW_EN | \
145 CKCTL_6362_PCIE_EN)
146
147
115#define CKCTL_6368_VDSL_QPROC_EN (1 << 2) 148#define CKCTL_6368_VDSL_QPROC_EN (1 << 2)
116#define CKCTL_6368_VDSL_AFE_EN (1 << 3) 149#define CKCTL_6368_VDSL_AFE_EN (1 << 3)
117#define CKCTL_6368_VDSL_BONDING_EN (1 << 4) 150#define CKCTL_6368_VDSL_BONDING_EN (1 << 4)
@@ -153,6 +186,7 @@
153#define PERF_IRQMASK_6345_REG 0xc 186#define PERF_IRQMASK_6345_REG 0xc
154#define PERF_IRQMASK_6348_REG 0xc 187#define PERF_IRQMASK_6348_REG 0xc
155#define PERF_IRQMASK_6358_REG 0xc 188#define PERF_IRQMASK_6358_REG 0xc
189#define PERF_IRQMASK_6362_REG 0x20
156#define PERF_IRQMASK_6368_REG 0x20 190#define PERF_IRQMASK_6368_REG 0x20
157 191
158/* Interrupt Status register */ 192/* Interrupt Status register */
@@ -161,6 +195,7 @@
161#define PERF_IRQSTAT_6345_REG 0x10 195#define PERF_IRQSTAT_6345_REG 0x10
162#define PERF_IRQSTAT_6348_REG 0x10 196#define PERF_IRQSTAT_6348_REG 0x10
163#define PERF_IRQSTAT_6358_REG 0x10 197#define PERF_IRQSTAT_6358_REG 0x10
198#define PERF_IRQSTAT_6362_REG 0x28
164#define PERF_IRQSTAT_6368_REG 0x28 199#define PERF_IRQSTAT_6368_REG 0x28
165 200
166/* External Interrupt Configuration register */ 201/* External Interrupt Configuration register */
@@ -169,6 +204,7 @@
169#define PERF_EXTIRQ_CFG_REG_6345 0x14 204#define PERF_EXTIRQ_CFG_REG_6345 0x14
170#define PERF_EXTIRQ_CFG_REG_6348 0x14 205#define PERF_EXTIRQ_CFG_REG_6348 0x14
171#define PERF_EXTIRQ_CFG_REG_6358 0x14 206#define PERF_EXTIRQ_CFG_REG_6358 0x14
207#define PERF_EXTIRQ_CFG_REG_6362 0x18
172#define PERF_EXTIRQ_CFG_REG_6368 0x18 208#define PERF_EXTIRQ_CFG_REG_6368 0x18
173 209
174#define PERF_EXTIRQ_CFG_REG2_6368 0x1c 210#define PERF_EXTIRQ_CFG_REG2_6368 0x1c
@@ -197,6 +233,7 @@
197#define PERF_SOFTRESET_REG 0x28 233#define PERF_SOFTRESET_REG 0x28
198#define PERF_SOFTRESET_6328_REG 0x10 234#define PERF_SOFTRESET_6328_REG 0x10
199#define PERF_SOFTRESET_6358_REG 0x34 235#define PERF_SOFTRESET_6358_REG 0x34
236#define PERF_SOFTRESET_6362_REG 0x10
200#define PERF_SOFTRESET_6368_REG 0x10 237#define PERF_SOFTRESET_6368_REG 0x10
201 238
202#define SOFTRESET_6328_SPI_MASK (1 << 0) 239#define SOFTRESET_6328_SPI_MASK (1 << 0)
@@ -259,6 +296,22 @@
259#define SOFTRESET_6358_PCM_MASK (1 << 13) 296#define SOFTRESET_6358_PCM_MASK (1 << 13)
260#define SOFTRESET_6358_ADSL_MASK (1 << 14) 297#define SOFTRESET_6358_ADSL_MASK (1 << 14)
261 298
299#define SOFTRESET_6362_SPI_MASK (1 << 0)
300#define SOFTRESET_6362_IPSEC_MASK (1 << 1)
301#define SOFTRESET_6362_EPHY_MASK (1 << 2)
302#define SOFTRESET_6362_SAR_MASK (1 << 3)
303#define SOFTRESET_6362_ENETSW_MASK (1 << 4)
304#define SOFTRESET_6362_USBS_MASK (1 << 5)
305#define SOFTRESET_6362_USBH_MASK (1 << 6)
306#define SOFTRESET_6362_PCM_MASK (1 << 7)
307#define SOFTRESET_6362_PCIE_CORE_MASK (1 << 8)
308#define SOFTRESET_6362_PCIE_MASK (1 << 9)
309#define SOFTRESET_6362_PCIE_EXT_MASK (1 << 10)
310#define SOFTRESET_6362_WLAN_SHIM_MASK (1 << 11)
311#define SOFTRESET_6362_DDR_PHY_MASK (1 << 12)
312#define SOFTRESET_6362_FAP_MASK (1 << 13)
313#define SOFTRESET_6362_WLAN_UBUS_MASK (1 << 14)
314
262#define SOFTRESET_6368_SPI_MASK (1 << 0) 315#define SOFTRESET_6368_SPI_MASK (1 << 0)
263#define SOFTRESET_6368_MPI_MASK (1 << 3) 316#define SOFTRESET_6368_MPI_MASK (1 << 3)
264#define SOFTRESET_6368_EPHY_MASK (1 << 6) 317#define SOFTRESET_6368_EPHY_MASK (1 << 6)
@@ -1223,24 +1276,7 @@
1223 * _REG relative to RSET_SPI 1276 * _REG relative to RSET_SPI
1224 *************************************************************************/ 1277 *************************************************************************/
1225 1278
1226/* BCM 6338 SPI core */ 1279/* BCM 6338/6348 SPI core */
1227#define SPI_6338_CMD 0x00 /* 16-bits register */
1228#define SPI_6338_INT_STATUS 0x02
1229#define SPI_6338_INT_MASK_ST 0x03
1230#define SPI_6338_INT_MASK 0x04
1231#define SPI_6338_ST 0x05
1232#define SPI_6338_CLK_CFG 0x06
1233#define SPI_6338_FILL_BYTE 0x07
1234#define SPI_6338_MSG_TAIL 0x09
1235#define SPI_6338_RX_TAIL 0x0b
1236#define SPI_6338_MSG_CTL 0x40 /* 8-bits register */
1237#define SPI_6338_MSG_CTL_WIDTH 8
1238#define SPI_6338_MSG_DATA 0x41
1239#define SPI_6338_MSG_DATA_SIZE 0x3f
1240#define SPI_6338_RX_DATA 0x80
1241#define SPI_6338_RX_DATA_SIZE 0x3f
1242
1243/* BCM 6348 SPI core */
1244#define SPI_6348_CMD 0x00 /* 16-bits register */ 1280#define SPI_6348_CMD 0x00 /* 16-bits register */
1245#define SPI_6348_INT_STATUS 0x02 1281#define SPI_6348_INT_STATUS 0x02
1246#define SPI_6348_INT_MASK_ST 0x03 1282#define SPI_6348_INT_MASK_ST 0x03
@@ -1257,7 +1293,7 @@
1257#define SPI_6348_RX_DATA 0x80 1293#define SPI_6348_RX_DATA 0x80
1258#define SPI_6348_RX_DATA_SIZE 0x3f 1294#define SPI_6348_RX_DATA_SIZE 0x3f
1259 1295
1260/* BCM 6358 SPI core */ 1296/* BCM 6358/6262/6368 SPI core */
1261#define SPI_6358_MSG_CTL 0x00 /* 16-bits register */ 1297#define SPI_6358_MSG_CTL 0x00 /* 16-bits register */
1262#define SPI_6358_MSG_CTL_WIDTH 16 1298#define SPI_6358_MSG_CTL_WIDTH 16
1263#define SPI_6358_MSG_DATA 0x02 1299#define SPI_6358_MSG_DATA 0x02
@@ -1274,23 +1310,6 @@
1274#define SPI_6358_MSG_TAIL 0x709 1310#define SPI_6358_MSG_TAIL 0x709
1275#define SPI_6358_RX_TAIL 0x70B 1311#define SPI_6358_RX_TAIL 0x70B
1276 1312
1277/* BCM 6358 SPI core */
1278#define SPI_6368_MSG_CTL 0x00 /* 16-bits register */
1279#define SPI_6368_MSG_CTL_WIDTH 16
1280#define SPI_6368_MSG_DATA 0x02
1281#define SPI_6368_MSG_DATA_SIZE 0x21e
1282#define SPI_6368_RX_DATA 0x400
1283#define SPI_6368_RX_DATA_SIZE 0x220
1284#define SPI_6368_CMD 0x700 /* 16-bits register */
1285#define SPI_6368_INT_STATUS 0x702
1286#define SPI_6368_INT_MASK_ST 0x703
1287#define SPI_6368_INT_MASK 0x704
1288#define SPI_6368_ST 0x705
1289#define SPI_6368_CLK_CFG 0x706
1290#define SPI_6368_FILL_BYTE 0x707
1291#define SPI_6368_MSG_TAIL 0x709
1292#define SPI_6368_RX_TAIL 0x70B
1293
1294/* Shared SPI definitions */ 1313/* Shared SPI definitions */
1295 1314
1296/* Message configuration */ 1315/* Message configuration */
@@ -1298,10 +1317,8 @@
1298#define SPI_HD_W 0x01 1317#define SPI_HD_W 0x01
1299#define SPI_HD_R 0x02 1318#define SPI_HD_R 0x02
1300#define SPI_BYTE_CNT_SHIFT 0 1319#define SPI_BYTE_CNT_SHIFT 0
1301#define SPI_6338_MSG_TYPE_SHIFT 6
1302#define SPI_6348_MSG_TYPE_SHIFT 6 1320#define SPI_6348_MSG_TYPE_SHIFT 6
1303#define SPI_6358_MSG_TYPE_SHIFT 14 1321#define SPI_6358_MSG_TYPE_SHIFT 14
1304#define SPI_6368_MSG_TYPE_SHIFT 14
1305 1322
1306/* Command */ 1323/* Command */
1307#define SPI_CMD_NOOP 0x00 1324#define SPI_CMD_NOOP 0x00
@@ -1348,10 +1365,18 @@
1348/************************************************************************* 1365/*************************************************************************
1349 * _REG relative to RSET_MISC 1366 * _REG relative to RSET_MISC
1350 *************************************************************************/ 1367 *************************************************************************/
1351#define MISC_SERDES_CTRL_REG 0x0 1368#define MISC_SERDES_CTRL_6328_REG 0x0
1369#define MISC_SERDES_CTRL_6362_REG 0x4
1352#define SERDES_PCIE_EN (1 << 0) 1370#define SERDES_PCIE_EN (1 << 0)
1353#define SERDES_PCIE_EXD_EN (1 << 15) 1371#define SERDES_PCIE_EXD_EN (1 << 15)
1354 1372
1373#define MISC_STRAPBUS_6362_REG 0x14
1374#define STRAPBUS_6362_FCVO_SHIFT 1
1375#define STRAPBUS_6362_HSSPI_CLK_FAST (1 << 13)
1376#define STRAPBUS_6362_FCVO_MASK (0x1f << STRAPBUS_6362_FCVO_SHIFT)
1377#define STRAPBUS_6362_BOOT_SEL_SERIAL (1 << 15)
1378#define STRAPBUS_6362_BOOT_SEL_NAND (0 << 15)
1379
1355#define MISC_STRAPBUS_6328_REG 0x240 1380#define MISC_STRAPBUS_6328_REG 0x240
1356#define STRAPBUS_6328_FCVO_SHIFT 7 1381#define STRAPBUS_6328_FCVO_SHIFT 7
1357#define STRAPBUS_6328_FCVO_MASK (0x1f << STRAPBUS_6328_FCVO_SHIFT) 1382#define STRAPBUS_6328_FCVO_MASK (0x1f << STRAPBUS_6328_FCVO_SHIFT)
diff --git a/arch/mips/include/asm/mach-bcm63xx/ioremap.h b/arch/mips/include/asm/mach-bcm63xx/ioremap.h
index 30931c42379d..94e3011ba7df 100644
--- a/arch/mips/include/asm/mach-bcm63xx/ioremap.h
+++ b/arch/mips/include/asm/mach-bcm63xx/ioremap.h
@@ -19,6 +19,7 @@ static inline int is_bcm63xx_internal_registers(phys_t offset)
19 return 1; 19 return 1;
20 break; 20 break;
21 case BCM6328_CPU_ID: 21 case BCM6328_CPU_ID:
22 case BCM6362_CPU_ID:
22 case BCM6368_CPU_ID: 23 case BCM6368_CPU_ID:
23 if (offset >= 0xb0000000 && offset < 0xb1000000) 24 if (offset >= 0xb0000000 && offset < 0xb1000000)
24 return 1; 25 return 1;
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
index 9c95177f7a7e..fe23034aaf72 100644
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
@@ -61,9 +61,8 @@ static inline int plat_device_is_coherent(struct device *dev)
61{ 61{
62#ifdef CONFIG_DMA_COHERENT 62#ifdef CONFIG_DMA_COHERENT
63 return 1; 63 return 1;
64#endif 64#else
65#ifdef CONFIG_DMA_NONCOHERENT 65 return coherentio;
66 return 0;
67#endif 66#endif
68} 67}
69 68
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index 73d717a75cb0..5b2f2e68e57f 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -20,14 +20,21 @@
20#endif 20#endif
21 21
22#ifdef CONFIG_32BIT 22#ifdef CONFIG_32BIT
23 23#ifdef CONFIG_KVM_GUEST
24#define CAC_BASE _AC(0x40000000, UL)
25#else
24#define CAC_BASE _AC(0x80000000, UL) 26#define CAC_BASE _AC(0x80000000, UL)
27#endif
25#define IO_BASE _AC(0xa0000000, UL) 28#define IO_BASE _AC(0xa0000000, UL)
26#define UNCAC_BASE _AC(0xa0000000, UL) 29#define UNCAC_BASE _AC(0xa0000000, UL)
27 30
28#ifndef MAP_BASE 31#ifndef MAP_BASE
32#ifdef CONFIG_KVM_GUEST
33#define MAP_BASE _AC(0x60000000, UL)
34#else
29#define MAP_BASE _AC(0xc0000000, UL) 35#define MAP_BASE _AC(0xc0000000, UL)
30#endif 36#endif
37#endif
31 38
32/* 39/*
33 * Memory above this physical address will be considered highmem. 40 * Memory above this physical address will be considered highmem.
diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
index 75fd8c0f986e..c0f3ef45c2c1 100644
--- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
@@ -57,5 +57,6 @@
57#define cpu_has_vint 0 57#define cpu_has_vint 0
58#define cpu_has_vtag_icache 0 58#define cpu_has_vtag_icache 0
59#define cpu_has_watch 1 59#define cpu_has_watch 1
60#define cpu_has_local_ebase 0
60 61
61#endif /* __ASM_MACH_LOONGSON_CPU_FEATURE_OVERRIDES_H */ 62#endif /* __ASM_MACH_LOONGSON_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/mt7620.h b/arch/mips/include/asm/mach-ralink/mt7620.h
new file mode 100644
index 000000000000..9809972ea882
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/mt7620.h
@@ -0,0 +1,84 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Parts of this file are based on Ralink's 2.6.21 BSP
7 *
8 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
9 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
10 * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
11 */
12
13#ifndef _MT7620_REGS_H_
14#define _MT7620_REGS_H_
15
16#define MT7620_SYSC_BASE 0x10000000
17
18#define SYSC_REG_CHIP_NAME0 0x00
19#define SYSC_REG_CHIP_NAME1 0x04
20#define SYSC_REG_CHIP_REV 0x0c
21#define SYSC_REG_SYSTEM_CONFIG0 0x10
22#define SYSC_REG_SYSTEM_CONFIG1 0x14
23#define SYSC_REG_CPLL_CONFIG0 0x54
24#define SYSC_REG_CPLL_CONFIG1 0x58
25
26#define MT7620N_CHIP_NAME0 0x33365452
27#define MT7620N_CHIP_NAME1 0x20203235
28
29#define MT7620A_CHIP_NAME0 0x3637544d
30#define MT7620A_CHIP_NAME1 0x20203032
31
32#define CHIP_REV_PKG_MASK 0x1
33#define CHIP_REV_PKG_SHIFT 16
34#define CHIP_REV_VER_MASK 0xf
35#define CHIP_REV_VER_SHIFT 8
36#define CHIP_REV_ECO_MASK 0xf
37
38#define CPLL_SW_CONFIG_SHIFT 31
39#define CPLL_SW_CONFIG_MASK 0x1
40#define CPLL_CPU_CLK_SHIFT 24
41#define CPLL_CPU_CLK_MASK 0x1
42#define CPLL_MULT_RATIO_SHIFT 16
43#define CPLL_MULT_RATIO 0x7
44#define CPLL_DIV_RATIO_SHIFT 10
45#define CPLL_DIV_RATIO 0x3
46
47#define SYSCFG0_DRAM_TYPE_MASK 0x3
48#define SYSCFG0_DRAM_TYPE_SHIFT 4
49#define SYSCFG0_DRAM_TYPE_SDRAM 0
50#define SYSCFG0_DRAM_TYPE_DDR1 1
51#define SYSCFG0_DRAM_TYPE_DDR2 2
52
53#define MT7620_DRAM_BASE 0x0
54#define MT7620_SDRAM_SIZE_MIN 2
55#define MT7620_SDRAM_SIZE_MAX 64
56#define MT7620_DDR1_SIZE_MIN 32
57#define MT7620_DDR1_SIZE_MAX 128
58#define MT7620_DDR2_SIZE_MIN 32
59#define MT7620_DDR2_SIZE_MAX 256
60
61#define MT7620_GPIO_MODE_I2C BIT(0)
62#define MT7620_GPIO_MODE_UART0_SHIFT 2
63#define MT7620_GPIO_MODE_UART0_MASK 0x7
64#define MT7620_GPIO_MODE_UART0(x) ((x) << MT7620_GPIO_MODE_UART0_SHIFT)
65#define MT7620_GPIO_MODE_UARTF 0x0
66#define MT7620_GPIO_MODE_PCM_UARTF 0x1
67#define MT7620_GPIO_MODE_PCM_I2S 0x2
68#define MT7620_GPIO_MODE_I2S_UARTF 0x3
69#define MT7620_GPIO_MODE_PCM_GPIO 0x4
70#define MT7620_GPIO_MODE_GPIO_UARTF 0x5
71#define MT7620_GPIO_MODE_GPIO_I2S 0x6
72#define MT7620_GPIO_MODE_GPIO 0x7
73#define MT7620_GPIO_MODE_UART1 BIT(5)
74#define MT7620_GPIO_MODE_MDIO BIT(8)
75#define MT7620_GPIO_MODE_RGMII1 BIT(9)
76#define MT7620_GPIO_MODE_RGMII2 BIT(10)
77#define MT7620_GPIO_MODE_SPI BIT(11)
78#define MT7620_GPIO_MODE_SPI_REF_CLK BIT(12)
79#define MT7620_GPIO_MODE_WLED BIT(13)
80#define MT7620_GPIO_MODE_JTAG BIT(15)
81#define MT7620_GPIO_MODE_EPHY BIT(15)
82#define MT7620_GPIO_MODE_WDT BIT(22)
83
84#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt288x.h b/arch/mips/include/asm/mach-ralink/rt288x.h
new file mode 100644
index 000000000000..03ad716acb42
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt288x.h
@@ -0,0 +1,53 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Parts of this file are based on Ralink's 2.6.21 BSP
7 *
8 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
9 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
10 * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
11 */
12
13#ifndef _RT288X_REGS_H_
14#define _RT288X_REGS_H_
15
16#define RT2880_SYSC_BASE 0x00300000
17
18#define SYSC_REG_CHIP_NAME0 0x00
19#define SYSC_REG_CHIP_NAME1 0x04
20#define SYSC_REG_CHIP_ID 0x0c
21#define SYSC_REG_SYSTEM_CONFIG 0x10
22#define SYSC_REG_CLKCFG 0x30
23
24#define RT2880_CHIP_NAME0 0x38325452
25#define RT2880_CHIP_NAME1 0x20203038
26
27#define CHIP_ID_ID_MASK 0xff
28#define CHIP_ID_ID_SHIFT 8
29#define CHIP_ID_REV_MASK 0xff
30
31#define SYSTEM_CONFIG_CPUCLK_SHIFT 20
32#define SYSTEM_CONFIG_CPUCLK_MASK 0x3
33#define SYSTEM_CONFIG_CPUCLK_250 0x0
34#define SYSTEM_CONFIG_CPUCLK_266 0x1
35#define SYSTEM_CONFIG_CPUCLK_280 0x2
36#define SYSTEM_CONFIG_CPUCLK_300 0x3
37
38#define RT2880_GPIO_MODE_I2C BIT(0)
39#define RT2880_GPIO_MODE_UART0 BIT(1)
40#define RT2880_GPIO_MODE_SPI BIT(2)
41#define RT2880_GPIO_MODE_UART1 BIT(3)
42#define RT2880_GPIO_MODE_JTAG BIT(4)
43#define RT2880_GPIO_MODE_MDIO BIT(5)
44#define RT2880_GPIO_MODE_SDRAM BIT(6)
45#define RT2880_GPIO_MODE_PCI BIT(7)
46
47#define CLKCFG_SRAM_CS_N_WDT BIT(9)
48
49#define RT2880_SDRAM_BASE 0x08000000
50#define RT2880_MEM_SIZE_MIN 2
51#define RT2880_MEM_SIZE_MAX 128
52
53#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h
new file mode 100644
index 000000000000..72fc10669199
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h
@@ -0,0 +1,56 @@
1/*
2 * Ralink RT288x specific CPU feature overrides
3 *
4 * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
6 *
7 * This file was derived from: include/asm-mips/cpu-features.h
8 * Copyright (C) 2003, 2004 Ralf Baechle
9 * Copyright (C) 2004 Maciej W. Rozycki
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published
13 * by the Free Software Foundation.
14 *
15 */
16#ifndef _RT288X_CPU_FEATURE_OVERRIDES_H
17#define _RT288X_CPU_FEATURE_OVERRIDES_H
18
19#define cpu_has_tlb 1
20#define cpu_has_4kex 1
21#define cpu_has_3k_cache 0
22#define cpu_has_4k_cache 1
23#define cpu_has_tx39_cache 0
24#define cpu_has_sb1_cache 0
25#define cpu_has_fpu 0
26#define cpu_has_32fpr 0
27#define cpu_has_counter 1
28#define cpu_has_watch 1
29#define cpu_has_divec 1
30
31#define cpu_has_prefetch 1
32#define cpu_has_ejtag 1
33#define cpu_has_llsc 1
34
35#define cpu_has_mips16 1
36#define cpu_has_mdmx 0
37#define cpu_has_mips3d 0
38#define cpu_has_smartmips 0
39
40#define cpu_has_mips32r1 1
41#define cpu_has_mips32r2 1
42#define cpu_has_mips64r1 0
43#define cpu_has_mips64r2 0
44
45#define cpu_has_dsp 0
46#define cpu_has_mipsmt 0
47
48#define cpu_has_64bits 0
49#define cpu_has_64bit_zero_reg 0
50#define cpu_has_64bit_gp_regs 0
51#define cpu_has_64bit_addresses 0
52
53#define cpu_dcache_line_size() 16
54#define cpu_icache_line_size() 16
55
56#endif /* _RT288X_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/rt305x.h b/arch/mips/include/asm/mach-ralink/rt305x.h
index 7d344f2d7d0a..069bf37a6010 100644
--- a/arch/mips/include/asm/mach-ralink/rt305x.h
+++ b/arch/mips/include/asm/mach-ralink/rt305x.h
@@ -97,6 +97,14 @@ static inline int soc_is_rt5350(void)
97#define RT5350_SYSCFG0_CPUCLK_320 0x2 97#define RT5350_SYSCFG0_CPUCLK_320 0x2
98#define RT5350_SYSCFG0_CPUCLK_300 0x3 98#define RT5350_SYSCFG0_CPUCLK_300 0x3
99 99
100#define RT5350_SYSCFG0_DRAM_SIZE_SHIFT 12
101#define RT5350_SYSCFG0_DRAM_SIZE_MASK 7
102#define RT5350_SYSCFG0_DRAM_SIZE_2M 0
103#define RT5350_SYSCFG0_DRAM_SIZE_8M 1
104#define RT5350_SYSCFG0_DRAM_SIZE_16M 2
105#define RT5350_SYSCFG0_DRAM_SIZE_32M 3
106#define RT5350_SYSCFG0_DRAM_SIZE_64M 4
107
100/* multi function gpio pins */ 108/* multi function gpio pins */
101#define RT305X_GPIO_I2C_SD 1 109#define RT305X_GPIO_I2C_SD 1
102#define RT305X_GPIO_I2C_SCLK 2 110#define RT305X_GPIO_I2C_SCLK 2
@@ -136,4 +144,23 @@ static inline int soc_is_rt5350(void)
136#define RT305X_GPIO_MODE_SDRAM BIT(8) 144#define RT305X_GPIO_MODE_SDRAM BIT(8)
137#define RT305X_GPIO_MODE_RGMII BIT(9) 145#define RT305X_GPIO_MODE_RGMII BIT(9)
138 146
147#define RT3352_SYSC_REG_SYSCFG0 0x010
148#define RT3352_SYSC_REG_SYSCFG1 0x014
149#define RT3352_SYSC_REG_CLKCFG1 0x030
150#define RT3352_SYSC_REG_RSTCTRL 0x034
151#define RT3352_SYSC_REG_USB_PS 0x05c
152
153#define RT3352_CLKCFG0_XTAL_SEL BIT(20)
154#define RT3352_CLKCFG1_UPHY0_CLK_EN BIT(18)
155#define RT3352_CLKCFG1_UPHY1_CLK_EN BIT(20)
156#define RT3352_RSTCTRL_UHST BIT(22)
157#define RT3352_RSTCTRL_UDEV BIT(25)
158#define RT3352_SYSCFG1_USB0_HOST_MODE BIT(10)
159
160#define RT305X_SDRAM_BASE 0x00000000
161#define RT305X_MEM_SIZE_MIN 2
162#define RT305X_MEM_SIZE_MAX 64
163#define RT3352_MEM_SIZE_MIN 2
164#define RT3352_MEM_SIZE_MAX 256
165
139#endif 166#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h
new file mode 100644
index 000000000000..917c28654552
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h
@@ -0,0 +1,56 @@
1/*
2 * Ralink RT305x specific CPU feature overrides
3 *
4 * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
6 *
7 * This file was derived from: include/asm-mips/cpu-features.h
8 * Copyright (C) 2003, 2004 Ralf Baechle
9 * Copyright (C) 2004 Maciej W. Rozycki
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published
13 * by the Free Software Foundation.
14 *
15 */
16#ifndef _RT305X_CPU_FEATURE_OVERRIDES_H
17#define _RT305X_CPU_FEATURE_OVERRIDES_H
18
19#define cpu_has_tlb 1
20#define cpu_has_4kex 1
21#define cpu_has_3k_cache 0
22#define cpu_has_4k_cache 1
23#define cpu_has_tx39_cache 0
24#define cpu_has_sb1_cache 0
25#define cpu_has_fpu 0
26#define cpu_has_32fpr 0
27#define cpu_has_counter 1
28#define cpu_has_watch 1
29#define cpu_has_divec 1
30
31#define cpu_has_prefetch 1
32#define cpu_has_ejtag 1
33#define cpu_has_llsc 1
34
35#define cpu_has_mips16 1
36#define cpu_has_mdmx 0
37#define cpu_has_mips3d 0
38#define cpu_has_smartmips 0
39
40#define cpu_has_mips32r1 1
41#define cpu_has_mips32r2 1
42#define cpu_has_mips64r1 0
43#define cpu_has_mips64r2 0
44
45#define cpu_has_dsp 1
46#define cpu_has_mipsmt 0
47
48#define cpu_has_64bits 0
49#define cpu_has_64bit_zero_reg 0
50#define cpu_has_64bit_gp_regs 0
51#define cpu_has_64bit_addresses 0
52
53#define cpu_dcache_line_size() 32
54#define cpu_icache_line_size() 32
55
56#endif /* _RT305X_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/rt3883.h b/arch/mips/include/asm/mach-ralink/rt3883.h
new file mode 100644
index 000000000000..058382f37f92
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt3883.h
@@ -0,0 +1,252 @@
1/*
2 * Ralink RT3662/RT3883 SoC register definitions
3 *
4 * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 */
10
11#ifndef _RT3883_REGS_H_
12#define _RT3883_REGS_H_
13
14#include <linux/bitops.h>
15
16#define RT3883_SDRAM_BASE 0x00000000
17#define RT3883_SYSC_BASE 0x10000000
18#define RT3883_TIMER_BASE 0x10000100
19#define RT3883_INTC_BASE 0x10000200
20#define RT3883_MEMC_BASE 0x10000300
21#define RT3883_UART0_BASE 0x10000500
22#define RT3883_PIO_BASE 0x10000600
23#define RT3883_FSCC_BASE 0x10000700
24#define RT3883_NANDC_BASE 0x10000810
25#define RT3883_I2C_BASE 0x10000900
26#define RT3883_I2S_BASE 0x10000a00
27#define RT3883_SPI_BASE 0x10000b00
28#define RT3883_UART1_BASE 0x10000c00
29#define RT3883_PCM_BASE 0x10002000
30#define RT3883_GDMA_BASE 0x10002800
31#define RT3883_CODEC1_BASE 0x10003000
32#define RT3883_CODEC2_BASE 0x10003800
33#define RT3883_FE_BASE 0x10100000
34#define RT3883_ROM_BASE 0x10118000
35#define RT3883_USBDEV_BASE 0x10112000
36#define RT3883_PCI_BASE 0x10140000
37#define RT3883_WLAN_BASE 0x10180000
38#define RT3883_USBHOST_BASE 0x101c0000
39#define RT3883_BOOT_BASE 0x1c000000
40#define RT3883_SRAM_BASE 0x1e000000
41#define RT3883_PCIMEM_BASE 0x20000000
42
43#define RT3883_EHCI_BASE (RT3883_USBHOST_BASE)
44#define RT3883_OHCI_BASE (RT3883_USBHOST_BASE + 0x1000)
45
46#define RT3883_SYSC_SIZE 0x100
47#define RT3883_TIMER_SIZE 0x100
48#define RT3883_INTC_SIZE 0x100
49#define RT3883_MEMC_SIZE 0x100
50#define RT3883_UART0_SIZE 0x100
51#define RT3883_UART1_SIZE 0x100
52#define RT3883_PIO_SIZE 0x100
53#define RT3883_FSCC_SIZE 0x100
54#define RT3883_NANDC_SIZE 0x0f0
55#define RT3883_I2C_SIZE 0x100
56#define RT3883_I2S_SIZE 0x100
57#define RT3883_SPI_SIZE 0x100
58#define RT3883_PCM_SIZE 0x800
59#define RT3883_GDMA_SIZE 0x800
60#define RT3883_CODEC1_SIZE 0x800
61#define RT3883_CODEC2_SIZE 0x800
62#define RT3883_FE_SIZE 0x10000
63#define RT3883_ROM_SIZE 0x4000
64#define RT3883_USBDEV_SIZE 0x4000
65#define RT3883_PCI_SIZE 0x40000
66#define RT3883_WLAN_SIZE 0x40000
67#define RT3883_USBHOST_SIZE 0x40000
68#define RT3883_BOOT_SIZE (32 * 1024 * 1024)
69#define RT3883_SRAM_SIZE (32 * 1024 * 1024)
70
71/* SYSC registers */
72#define RT3883_SYSC_REG_CHIPID0_3 0x00 /* Chip ID 0 */
73#define RT3883_SYSC_REG_CHIPID4_7 0x04 /* Chip ID 1 */
74#define RT3883_SYSC_REG_REVID 0x0c /* Chip Revision Identification */
75#define RT3883_SYSC_REG_SYSCFG0 0x10 /* System Configuration 0 */
76#define RT3883_SYSC_REG_SYSCFG1 0x14 /* System Configuration 1 */
77#define RT3883_SYSC_REG_CLKCFG0 0x2c /* Clock Configuration 0 */
78#define RT3883_SYSC_REG_CLKCFG1 0x30 /* Clock Configuration 1 */
79#define RT3883_SYSC_REG_RSTCTRL 0x34 /* Reset Control*/
80#define RT3883_SYSC_REG_RSTSTAT 0x38 /* Reset Status*/
81#define RT3883_SYSC_REG_USB_PS 0x5c /* USB Power saving control */
82#define RT3883_SYSC_REG_GPIO_MODE 0x60 /* GPIO Purpose Select */
83#define RT3883_SYSC_REG_PCIE_CLK_GEN0 0x7c
84#define RT3883_SYSC_REG_PCIE_CLK_GEN1 0x80
85#define RT3883_SYSC_REG_PCIE_CLK_GEN2 0x84
86#define RT3883_SYSC_REG_PMU 0x88
87#define RT3883_SYSC_REG_PMU1 0x8c
88
89#define RT3883_CHIP_NAME0 0x38335452
90#define RT3883_CHIP_NAME1 0x20203338
91
92#define RT3883_REVID_VER_ID_MASK 0x0f
93#define RT3883_REVID_VER_ID_SHIFT 8
94#define RT3883_REVID_ECO_ID_MASK 0x0f
95
96#define RT3883_SYSCFG0_DRAM_TYPE_DDR2 BIT(17)
97#define RT3883_SYSCFG0_CPUCLK_SHIFT 8
98#define RT3883_SYSCFG0_CPUCLK_MASK 0x3
99#define RT3883_SYSCFG0_CPUCLK_250 0x0
100#define RT3883_SYSCFG0_CPUCLK_384 0x1
101#define RT3883_SYSCFG0_CPUCLK_480 0x2
102#define RT3883_SYSCFG0_CPUCLK_500 0x3
103
104#define RT3883_SYSCFG1_USB0_HOST_MODE BIT(10)
105#define RT3883_SYSCFG1_PCIE_RC_MODE BIT(8)
106#define RT3883_SYSCFG1_PCI_HOST_MODE BIT(7)
107#define RT3883_SYSCFG1_PCI_66M_MODE BIT(6)
108#define RT3883_SYSCFG1_GPIO2_AS_WDT_OUT BIT(2)
109
110#define RT3883_CLKCFG1_PCIE_CLK_EN BIT(21)
111#define RT3883_CLKCFG1_UPHY1_CLK_EN BIT(20)
112#define RT3883_CLKCFG1_PCI_CLK_EN BIT(19)
113#define RT3883_CLKCFG1_UPHY0_CLK_EN BIT(18)
114
115#define RT3883_GPIO_MODE_I2C BIT(0)
116#define RT3883_GPIO_MODE_SPI BIT(1)
117#define RT3883_GPIO_MODE_UART0_SHIFT 2
118#define RT3883_GPIO_MODE_UART0_MASK 0x7
119#define RT3883_GPIO_MODE_UART0(x) ((x) << RT3883_GPIO_MODE_UART0_SHIFT)
120#define RT3883_GPIO_MODE_UARTF 0x0
121#define RT3883_GPIO_MODE_PCM_UARTF 0x1
122#define RT3883_GPIO_MODE_PCM_I2S 0x2
123#define RT3883_GPIO_MODE_I2S_UARTF 0x3
124#define RT3883_GPIO_MODE_PCM_GPIO 0x4
125#define RT3883_GPIO_MODE_GPIO_UARTF 0x5
126#define RT3883_GPIO_MODE_GPIO_I2S 0x6
127#define RT3883_GPIO_MODE_GPIO 0x7
128#define RT3883_GPIO_MODE_UART1 BIT(5)
129#define RT3883_GPIO_MODE_JTAG BIT(6)
130#define RT3883_GPIO_MODE_MDIO BIT(7)
131#define RT3883_GPIO_MODE_GE1 BIT(9)
132#define RT3883_GPIO_MODE_GE2 BIT(10)
133#define RT3883_GPIO_MODE_PCI_SHIFT 11
134#define RT3883_GPIO_MODE_PCI_MASK 0x7
135#define RT3883_GPIO_MODE_PCI (RT3883_GPIO_MODE_PCI_MASK << RT3883_GPIO_MODE_PCI_SHIFT)
136#define RT3883_GPIO_MODE_LNA_A_SHIFT 16
137#define RT3883_GPIO_MODE_LNA_A_MASK 0x3
138#define _RT3883_GPIO_MODE_LNA_A(_x) ((_x) << RT3883_GPIO_MODE_LNA_A_SHIFT)
139#define RT3883_GPIO_MODE_LNA_A_GPIO 0x3
140#define RT3883_GPIO_MODE_LNA_A _RT3883_GPIO_MODE_LNA_A(RT3883_GPIO_MODE_LNA_A_MASK)
141#define RT3883_GPIO_MODE_LNA_G_SHIFT 18
142#define RT3883_GPIO_MODE_LNA_G_MASK 0x3
143#define _RT3883_GPIO_MODE_LNA_G(_x) ((_x) << RT3883_GPIO_MODE_LNA_G_SHIFT)
144#define RT3883_GPIO_MODE_LNA_G_GPIO 0x3
145#define RT3883_GPIO_MODE_LNA_G _RT3883_GPIO_MODE_LNA_G(RT3883_GPIO_MODE_LNA_G_MASK)
146
147#define RT3883_GPIO_I2C_SD 1
148#define RT3883_GPIO_I2C_SCLK 2
149#define RT3883_GPIO_SPI_CS0 3
150#define RT3883_GPIO_SPI_CLK 4
151#define RT3883_GPIO_SPI_MOSI 5
152#define RT3883_GPIO_SPI_MISO 6
153#define RT3883_GPIO_7 7
154#define RT3883_GPIO_10 10
155#define RT3883_GPIO_11 11
156#define RT3883_GPIO_14 14
157#define RT3883_GPIO_UART1_TXD 15
158#define RT3883_GPIO_UART1_RXD 16
159#define RT3883_GPIO_JTAG_TDO 17
160#define RT3883_GPIO_JTAG_TDI 18
161#define RT3883_GPIO_JTAG_TMS 19
162#define RT3883_GPIO_JTAG_TCLK 20
163#define RT3883_GPIO_JTAG_TRST_N 21
164#define RT3883_GPIO_MDIO_MDC 22
165#define RT3883_GPIO_MDIO_MDIO 23
166#define RT3883_GPIO_LNA_PE_A0 32
167#define RT3883_GPIO_LNA_PE_A1 33
168#define RT3883_GPIO_LNA_PE_A2 34
169#define RT3883_GPIO_LNA_PE_G0 35
170#define RT3883_GPIO_LNA_PE_G1 36
171#define RT3883_GPIO_LNA_PE_G2 37
172#define RT3883_GPIO_PCI_AD0 40
173#define RT3883_GPIO_PCI_AD31 71
174#define RT3883_GPIO_GE2_TXD0 72
175#define RT3883_GPIO_GE2_TXD1 73
176#define RT3883_GPIO_GE2_TXD2 74
177#define RT3883_GPIO_GE2_TXD3 75
178#define RT3883_GPIO_GE2_TXEN 76
179#define RT3883_GPIO_GE2_TXCLK 77
180#define RT3883_GPIO_GE2_RXD0 78
181#define RT3883_GPIO_GE2_RXD1 79
182#define RT3883_GPIO_GE2_RXD2 80
183#define RT3883_GPIO_GE2_RXD3 81
184#define RT3883_GPIO_GE2_RXDV 82
185#define RT3883_GPIO_GE2_RXCLK 83
186#define RT3883_GPIO_GE1_TXD0 84
187#define RT3883_GPIO_GE1_TXD1 85
188#define RT3883_GPIO_GE1_TXD2 86
189#define RT3883_GPIO_GE1_TXD3 87
190#define RT3883_GPIO_GE1_TXEN 88
191#define RT3883_GPIO_GE1_TXCLK 89
192#define RT3883_GPIO_GE1_RXD0 90
193#define RT3883_GPIO_GE1_RXD1 91
194#define RT3883_GPIO_GE1_RXD2 92
195#define RT3883_GPIO_GE1_RXD3 93
196#define RT3883_GPIO_GE1_RXDV 94
197#define RT3883_GPIO_GE1_RXCLK 95
198
199#define RT3883_RSTCTRL_PCIE_PCI_PDM BIT(27)
200#define RT3883_RSTCTRL_FLASH BIT(26)
201#define RT3883_RSTCTRL_UDEV BIT(25)
202#define RT3883_RSTCTRL_PCI BIT(24)
203#define RT3883_RSTCTRL_PCIE BIT(23)
204#define RT3883_RSTCTRL_UHST BIT(22)
205#define RT3883_RSTCTRL_FE BIT(21)
206#define RT3883_RSTCTRL_WLAN BIT(20)
207#define RT3883_RSTCTRL_UART1 BIT(29)
208#define RT3883_RSTCTRL_SPI BIT(18)
209#define RT3883_RSTCTRL_I2S BIT(17)
210#define RT3883_RSTCTRL_I2C BIT(16)
211#define RT3883_RSTCTRL_NAND BIT(15)
212#define RT3883_RSTCTRL_DMA BIT(14)
213#define RT3883_RSTCTRL_PIO BIT(13)
214#define RT3883_RSTCTRL_UART BIT(12)
215#define RT3883_RSTCTRL_PCM BIT(11)
216#define RT3883_RSTCTRL_MC BIT(10)
217#define RT3883_RSTCTRL_INTC BIT(9)
218#define RT3883_RSTCTRL_TIMER BIT(8)
219#define RT3883_RSTCTRL_SYS BIT(0)
220
221#define RT3883_INTC_INT_SYSCTL BIT(0)
222#define RT3883_INTC_INT_TIMER0 BIT(1)
223#define RT3883_INTC_INT_TIMER1 BIT(2)
224#define RT3883_INTC_INT_IA BIT(3)
225#define RT3883_INTC_INT_PCM BIT(4)
226#define RT3883_INTC_INT_UART0 BIT(5)
227#define RT3883_INTC_INT_PIO BIT(6)
228#define RT3883_INTC_INT_DMA BIT(7)
229#define RT3883_INTC_INT_NAND BIT(8)
230#define RT3883_INTC_INT_PERFC BIT(9)
231#define RT3883_INTC_INT_I2S BIT(10)
232#define RT3883_INTC_INT_UART1 BIT(12)
233#define RT3883_INTC_INT_UHST BIT(18)
234#define RT3883_INTC_INT_UDEV BIT(19)
235
236/* FLASH/SRAM/Codec Controller registers */
237#define RT3883_FSCC_REG_FLASH_CFG0 0x00
238#define RT3883_FSCC_REG_FLASH_CFG1 0x04
239#define RT3883_FSCC_REG_CODEC_CFG0 0x40
240#define RT3883_FSCC_REG_CODEC_CFG1 0x44
241
242#define RT3883_FLASH_CFG_WIDTH_SHIFT 26
243#define RT3883_FLASH_CFG_WIDTH_MASK 0x3
244#define RT3883_FLASH_CFG_WIDTH_8BIT 0x0
245#define RT3883_FLASH_CFG_WIDTH_16BIT 0x1
246#define RT3883_FLASH_CFG_WIDTH_32BIT 0x2
247
248#define RT3883_SDRAM_BASE 0x00000000
249#define RT3883_MEM_SIZE_MIN 2
250#define RT3883_MEM_SIZE_MAX 256
251
252#endif /* _RT3883_REGS_H_ */
diff --git a/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h
new file mode 100644
index 000000000000..181fbf4c976f
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h
@@ -0,0 +1,55 @@
1/*
2 * Ralink RT3662/RT3883 specific CPU feature overrides
3 *
4 * Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
5 *
6 * This file was derived from: include/asm-mips/cpu-features.h
7 * Copyright (C) 2003, 2004 Ralf Baechle
8 * Copyright (C) 2004 Maciej W. Rozycki
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 *
14 */
15#ifndef _RT3883_CPU_FEATURE_OVERRIDES_H
16#define _RT3883_CPU_FEATURE_OVERRIDES_H
17
18#define cpu_has_tlb 1
19#define cpu_has_4kex 1
20#define cpu_has_3k_cache 0
21#define cpu_has_4k_cache 1
22#define cpu_has_tx39_cache 0
23#define cpu_has_sb1_cache 0
24#define cpu_has_fpu 0
25#define cpu_has_32fpr 0
26#define cpu_has_counter 1
27#define cpu_has_watch 1
28#define cpu_has_divec 1
29
30#define cpu_has_prefetch 1
31#define cpu_has_ejtag 1
32#define cpu_has_llsc 1
33
34#define cpu_has_mips16 1
35#define cpu_has_mdmx 0
36#define cpu_has_mips3d 0
37#define cpu_has_smartmips 0
38
39#define cpu_has_mips32r1 1
40#define cpu_has_mips32r2 1
41#define cpu_has_mips64r1 0
42#define cpu_has_mips64r2 0
43
44#define cpu_has_dsp 1
45#define cpu_has_mipsmt 0
46
47#define cpu_has_64bits 0
48#define cpu_has_64bit_zero_reg 0
49#define cpu_has_64bit_gp_regs 0
50#define cpu_has_64bit_addresses 0
51
52#define cpu_dcache_line_size() 32
53#define cpu_icache_line_size() 32
54
55#endif /* _RT3883_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
index 193c0912d38e..bfbd7035d4c5 100644
--- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
@@ -28,7 +28,11 @@
28/* #define cpu_has_prefetch ? */ 28/* #define cpu_has_prefetch ? */
29#define cpu_has_mcheck 1 29#define cpu_has_mcheck 1
30/* #define cpu_has_ejtag ? */ 30/* #define cpu_has_ejtag ? */
31#ifdef CONFIG_CPU_MICROMIPS
32#define cpu_has_llsc 0
33#else
31#define cpu_has_llsc 1 34#define cpu_has_llsc 1
35#endif
32/* #define cpu_has_vtag_icache ? */ 36/* #define cpu_has_vtag_icache ? */
33/* #define cpu_has_dc_aliases ? */ 37/* #define cpu_has_dc_aliases ? */
34/* #define cpu_has_ic_fills_f_dc ? */ 38/* #define cpu_has_ic_fills_f_dc ? */
diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h
index 44a09a64160a..bd9746fbe4af 100644
--- a/arch/mips/include/asm/mips-boards/generic.h
+++ b/arch/mips/include/asm/mips-boards/generic.h
@@ -83,4 +83,7 @@ extern void mips_pcibios_init(void);
83#define mips_pcibios_init() do { } while (0) 83#define mips_pcibios_init() do { } while (0)
84#endif 84#endif
85 85
86extern void mips_scroll_message(void);
87extern void mips_display_message(const char *str);
88
86#endif /* __ASM_MIPS_BOARDS_GENERIC_H */ 89#endif /* __ASM_MIPS_BOARDS_GENERIC_H */
diff --git a/arch/mips/include/asm/mips-boards/prom.h b/arch/mips/include/asm/mips-boards/prom.h
deleted file mode 100644
index e7aed3e4ff58..000000000000
--- a/arch/mips/include/asm/mips-boards/prom.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * Carsten Langgaard, carstenl@mips.com
3 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
4 *
5 * ########################################################################
6 *
7 * This program is free software; you can distribute it and/or modify it
8 * under the terms of the GNU General Public License (Version 2) as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
19 *
20 * ########################################################################
21 *
22 * MIPS boards bootprom interface for the Linux kernel.
23 *
24 */
25
26#ifndef _MIPS_PROM_H
27#define _MIPS_PROM_H
28
29extern char *prom_getcmdline(void);
30extern char *prom_getenv(char *name);
31extern void prom_init_cmdline(void);
32extern void prom_meminit(void);
33extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem);
34extern void mips_display_message(const char *str);
35extern void mips_display_word(unsigned int num);
36extern void mips_scroll_message(void);
37extern int get_ethernet_addr(char *ethernet_addr);
38
39/* Memory descriptor management. */
40#define PROM_MAX_PMEMBLOCKS 32
41struct prom_pmemblock {
42 unsigned long base; /* Within KSEG0. */
43 unsigned int size; /* In bytes. */
44 unsigned int type; /* free or prom memory */
45};
46
47#endif /* !(_MIPS_PROM_H) */
diff --git a/arch/mips/include/asm/mips_machine.h b/arch/mips/include/asm/mips_machine.h
index 363bb352c7f7..9d00aebe9842 100644
--- a/arch/mips/include/asm/mips_machine.h
+++ b/arch/mips/include/asm/mips_machine.h
@@ -42,13 +42,9 @@ extern long __mips_machines_end;
42#ifdef CONFIG_MIPS_MACHINE 42#ifdef CONFIG_MIPS_MACHINE
43int mips_machtype_setup(char *id) __init; 43int mips_machtype_setup(char *id) __init;
44void mips_machine_setup(void) __init; 44void mips_machine_setup(void) __init;
45void mips_set_machine_name(const char *name) __init;
46char *mips_get_machine_name(void);
47#else 45#else
48static inline int mips_machtype_setup(char *id) { return 1; } 46static inline int mips_machtype_setup(char *id) { return 1; }
49static inline void mips_machine_setup(void) { } 47static inline void mips_machine_setup(void) { }
50static inline void mips_set_machine_name(const char *name) { }
51static inline char *mips_get_machine_name(void) { return NULL; }
52#endif /* CONFIG_MIPS_MACHINE */ 48#endif /* CONFIG_MIPS_MACHINE */
53 49
54#endif /* __ASM_MIPS_MACHINE_H */ 50#endif /* __ASM_MIPS_MACHINE_H */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 0da44d422f5b..87e6207b05e4 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -596,6 +596,7 @@
596#define MIPS_CONF3_RXI (_ULCAST_(1) << 12) 596#define MIPS_CONF3_RXI (_ULCAST_(1) << 12)
597#define MIPS_CONF3_ULRI (_ULCAST_(1) << 13) 597#define MIPS_CONF3_ULRI (_ULCAST_(1) << 13)
598#define MIPS_CONF3_ISA (_ULCAST_(3) << 14) 598#define MIPS_CONF3_ISA (_ULCAST_(3) << 14)
599#define MIPS_CONF3_ISA_OE (_ULCAST_(3) << 16)
599#define MIPS_CONF3_VZ (_ULCAST_(1) << 23) 600#define MIPS_CONF3_VZ (_ULCAST_(1) << 23)
600 601
601#define MIPS_CONF4_MMUSIZEEXT (_ULCAST_(255) << 0) 602#define MIPS_CONF4_MMUSIZEEXT (_ULCAST_(255) << 0)
@@ -623,6 +624,24 @@
623#ifndef __ASSEMBLY__ 624#ifndef __ASSEMBLY__
624 625
625/* 626/*
627 * Macros for handling the ISA mode bit for microMIPS.
628 */
629#define get_isa16_mode(x) ((x) & 0x1)
630#define msk_isa16_mode(x) ((x) & ~0x1)
631#define set_isa16_mode(x) do { (x) |= 0x1; } while(0)
632
633/*
634 * microMIPS instructions can be 16-bit or 32-bit in length. This
635 * returns a 1 if the instruction is 16-bit and a 0 if 32-bit.
636 */
637static inline int mm_insn_16bit(u16 insn)
638{
639 u16 opcode = (insn >> 10) & 0x7;
640
641 return (opcode >= 1 && opcode <= 3) ? 1 : 0;
642}
643
644/*
626 * Functions to access the R10000 performance counters. These are basically 645 * Functions to access the R10000 performance counters. These are basically
627 * mfc0 and mtc0 instructions from and to coprocessor register with a 5-bit 646 * mfc0 and mtc0 instructions from and to coprocessor register with a 5-bit
628 * performance counter number encoded into bits 1 ... 5 of the instruction. 647 * performance counter number encoded into bits 1 ... 5 of the instruction.
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index e81d719efcd1..1554721e4808 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -26,10 +26,15 @@
26 26
27#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 27#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
28 28
29#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ 29#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
30 tlbmiss_handler_setup_pgd((unsigned long)(pgd)) 30do { \
31 31 void (*tlbmiss_handler_setup_pgd)(unsigned long); \
32extern void tlbmiss_handler_setup_pgd(unsigned long pgd); 32 extern u32 tlbmiss_handler_setup_pgd_array[16]; \
33 \
34 tlbmiss_handler_setup_pgd = \
35 (__typeof__(tlbmiss_handler_setup_pgd)) tlbmiss_handler_setup_pgd_array; \
36 tlbmiss_handler_setup_pgd((unsigned long)(pgd)); \
37} while (0)
33 38
34#define TLBMISS_HANDLER_SETUP() \ 39#define TLBMISS_HANDLER_SETUP() \
35 do { \ 40 do { \
@@ -62,59 +67,88 @@ extern unsigned long pgd_current[];
62 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) 67 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
63#endif 68#endif
64#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ 69#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
65#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
66
67#define ASID_INC 0x40
68#define ASID_MASK 0xfc0
69
70#elif defined(CONFIG_CPU_R8000)
71
72#define ASID_INC 0x10
73#define ASID_MASK 0xff0
74 70
75#elif defined(CONFIG_MIPS_MT_SMTC) 71#define ASID_INC(asid) \
76 72({ \
77#define ASID_INC 0x1 73 unsigned long __asid = asid; \
78extern unsigned long smtc_asid_mask; 74 __asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t" \
79#define ASID_MASK (smtc_asid_mask) 75 ".section\t__asid_inc,\"a\"\n\t" \
80#define HW_ASID_MASK 0xff 76 ".word\t1b\n\t" \
81/* End SMTC/34K debug hack */ 77 ".previous" \
82#else /* FIXME: not correct for R6000 */ 78 :"=r" (__asid) \
83 79 :"0" (__asid)); \
84#define ASID_INC 0x1 80 __asid; \
85#define ASID_MASK 0xff 81})
82#define ASID_MASK(asid) \
83({ \
84 unsigned long __asid = asid; \
85 __asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t" \
86 ".section\t__asid_mask,\"a\"\n\t" \
87 ".word\t1b\n\t" \
88 ".previous" \
89 :"=r" (__asid) \
90 :"r" (__asid)); \
91 __asid; \
92})
93#define ASID_VERSION_MASK \
94({ \
95 unsigned long __asid; \
96 __asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t" \
97 ".section\t__asid_version_mask,\"a\"\n\t" \
98 ".word\t1b\n\t" \
99 ".previous" \
100 :"=r" (__asid)); \
101 __asid; \
102})
103#define ASID_FIRST_VERSION \
104({ \
105 unsigned long __asid = asid; \
106 __asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t" \
107 ".section\t__asid_first_version,\"a\"\n\t" \
108 ".word\t1b\n\t" \
109 ".previous" \
110 :"=r" (__asid)); \
111 __asid; \
112})
113
114#define ASID_FIRST_VERSION_R3000 0x1000
115#define ASID_FIRST_VERSION_R4000 0x100
116#define ASID_FIRST_VERSION_R8000 0x1000
117#define ASID_FIRST_VERSION_RM9000 0x1000
86 118
119#ifdef CONFIG_MIPS_MT_SMTC
120#define SMTC_HW_ASID_MASK 0xff
121extern unsigned int smtc_asid_mask;
87#endif 122#endif
88 123
89#define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) 124#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
90#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) 125#define cpu_asid(cpu, mm) ASID_MASK(cpu_context((cpu), (mm)))
91#define asid_cache(cpu) (cpu_data[cpu].asid_cache) 126#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
92 127
93static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 128static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
94{ 129{
95} 130}
96 131
97/*
98 * All unused by hardware upper bits will be considered
99 * as a software asid extension.
100 */
101#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
102#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
103
104#ifndef CONFIG_MIPS_MT_SMTC 132#ifndef CONFIG_MIPS_MT_SMTC
105/* Normal, classic MIPS get_new_mmu_context */ 133/* Normal, classic MIPS get_new_mmu_context */
106static inline void 134static inline void
107get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) 135get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
108{ 136{
137 extern void kvm_local_flush_tlb_all(void);
109 unsigned long asid = asid_cache(cpu); 138 unsigned long asid = asid_cache(cpu);
110 139
111 if (! ((asid += ASID_INC) & ASID_MASK) ) { 140 if (!ASID_MASK((asid = ASID_INC(asid)))) {
112 if (cpu_has_vtag_icache) 141 if (cpu_has_vtag_icache)
113 flush_icache_all(); 142 flush_icache_all();
143#ifdef CONFIG_VIRTUALIZATION
144 kvm_local_flush_tlb_all(); /* start new asid cycle */
145#else
114 local_flush_tlb_all(); /* start new asid cycle */ 146 local_flush_tlb_all(); /* start new asid cycle */
147#endif
115 if (!asid) /* fix version if needed */ 148 if (!asid) /* fix version if needed */
116 asid = ASID_FIRST_VERSION; 149 asid = ASID_FIRST_VERSION;
117 } 150 }
151
118 cpu_context(cpu, mm) = asid_cache(cpu) = asid; 152 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
119} 153}
120 154
@@ -133,7 +167,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
133{ 167{
134 int i; 168 int i;
135 169
136 for_each_online_cpu(i) 170 for_each_possible_cpu(i)
137 cpu_context(i, mm) = 0; 171 cpu_context(i, mm) = 0;
138 172
139 return 0; 173 return 0;
@@ -166,7 +200,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
166 * free up the ASID value for use and flush any old 200 * free up the ASID value for use and flush any old
167 * instances of it from the TLB. 201 * instances of it from the TLB.
168 */ 202 */
169 oldasid = (read_c0_entryhi() & ASID_MASK); 203 oldasid = ASID_MASK(read_c0_entryhi());
170 if(smtc_live_asid[mytlb][oldasid]) { 204 if(smtc_live_asid[mytlb][oldasid]) {
171 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 205 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
172 if(smtc_live_asid[mytlb][oldasid] == 0) 206 if(smtc_live_asid[mytlb][oldasid] == 0)
@@ -177,7 +211,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
177 * having ASID_MASK smaller than the hardware maximum, 211 * having ASID_MASK smaller than the hardware maximum,
178 * make sure no "soft" bits become "hard"... 212 * make sure no "soft" bits become "hard"...
179 */ 213 */
180 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | 214 write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
181 cpu_asid(cpu, next)); 215 cpu_asid(cpu, next));
182 ehb(); /* Make sure it propagates to TCStatus */ 216 ehb(); /* Make sure it propagates to TCStatus */
183 evpe(mtflags); 217 evpe(mtflags);
@@ -230,15 +264,15 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
230#ifdef CONFIG_MIPS_MT_SMTC 264#ifdef CONFIG_MIPS_MT_SMTC
231 /* See comments for similar code above */ 265 /* See comments for similar code above */
232 mtflags = dvpe(); 266 mtflags = dvpe();
233 oldasid = read_c0_entryhi() & ASID_MASK; 267 oldasid = ASID_MASK(read_c0_entryhi());
234 if(smtc_live_asid[mytlb][oldasid]) { 268 if(smtc_live_asid[mytlb][oldasid]) {
235 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 269 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
236 if(smtc_live_asid[mytlb][oldasid] == 0) 270 if(smtc_live_asid[mytlb][oldasid] == 0)
237 smtc_flush_tlb_asid(oldasid); 271 smtc_flush_tlb_asid(oldasid);
238 } 272 }
239 /* See comments for similar code above */ 273 /* See comments for similar code above */
240 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | 274 write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
241 cpu_asid(cpu, next)); 275 cpu_asid(cpu, next));
242 ehb(); /* Make sure it propagates to TCStatus */ 276 ehb(); /* Make sure it propagates to TCStatus */
243 evpe(mtflags); 277 evpe(mtflags);
244#else 278#else
@@ -275,14 +309,14 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
275#ifdef CONFIG_MIPS_MT_SMTC 309#ifdef CONFIG_MIPS_MT_SMTC
276 /* See comments for similar code above */ 310 /* See comments for similar code above */
277 prevvpe = dvpe(); 311 prevvpe = dvpe();
278 oldasid = (read_c0_entryhi() & ASID_MASK); 312 oldasid = ASID_MASK(read_c0_entryhi());
279 if (smtc_live_asid[mytlb][oldasid]) { 313 if (smtc_live_asid[mytlb][oldasid]) {
280 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 314 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
281 if(smtc_live_asid[mytlb][oldasid] == 0) 315 if(smtc_live_asid[mytlb][oldasid] == 0)
282 smtc_flush_tlb_asid(oldasid); 316 smtc_flush_tlb_asid(oldasid);
283 } 317 }
284 /* See comments for similar code above */ 318 /* See comments for similar code above */
285 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) 319 write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK)
286 | cpu_asid(cpu, mm)); 320 | cpu_asid(cpu, mm));
287 ehb(); /* Make sure it propagates to TCStatus */ 321 ehb(); /* Make sure it propagates to TCStatus */
288 evpe(prevvpe); 322 evpe(prevvpe);
diff --git a/arch/mips/include/asm/netlogic/haldefs.h b/arch/mips/include/asm/netlogic/haldefs.h
index 419d8aef8569..79c7cccdc22c 100644
--- a/arch/mips/include/asm/netlogic/haldefs.h
+++ b/arch/mips/include/asm/netlogic/haldefs.h
@@ -35,42 +35,13 @@
35#ifndef __NLM_HAL_HALDEFS_H__ 35#ifndef __NLM_HAL_HALDEFS_H__
36#define __NLM_HAL_HALDEFS_H__ 36#define __NLM_HAL_HALDEFS_H__
37 37
38#include <linux/irqflags.h> /* for local_irq_disable */
39
38/* 40/*
39 * This file contains platform specific memory mapped IO implementation 41 * This file contains platform specific memory mapped IO implementation
40 * and will provide a way to read 32/64 bit memory mapped registers in 42 * and will provide a way to read 32/64 bit memory mapped registers in
41 * all ABIs 43 * all ABIs
42 */ 44 */
43#if !defined(CONFIG_64BIT) && defined(CONFIG_CPU_XLP)
44#error "o32 compile not supported on XLP yet"
45#endif
46/*
47 * For o32 compilation, we have to disable interrupts and enable KX bit to
48 * access 64 bit addresses or data.
49 *
50 * We need to disable interrupts because we save just the lower 32 bits of
51 * registers in interrupt handling. So if we get hit by an interrupt while
52 * using the upper 32 bits of a register, we lose.
53 */
54static inline uint32_t nlm_save_flags_kx(void)
55{
56 return change_c0_status(ST0_KX | ST0_IE, ST0_KX);
57}
58
59static inline uint32_t nlm_save_flags_cop2(void)
60{
61 return change_c0_status(ST0_CU2 | ST0_IE, ST0_CU2);
62}
63
64static inline void nlm_restore_flags(uint32_t sr)
65{
66 write_c0_status(sr);
67}
68
69/*
70 * The n64 implementations are simple, the o32 implementations when they
71 * are added, will have to disable interrupts and enable KX before doing
72 * 64 bit ops.
73 */
74static inline uint32_t 45static inline uint32_t
75nlm_read_reg(uint64_t base, uint32_t reg) 46nlm_read_reg(uint64_t base, uint32_t reg)
76{ 47{
@@ -87,13 +58,40 @@ nlm_write_reg(uint64_t base, uint32_t reg, uint32_t val)
87 *addr = val; 58 *addr = val;
88} 59}
89 60
61/*
62 * For o32 compilation, we have to disable interrupts to access 64 bit
63 * registers
64 *
65 * We need to disable interrupts because we save just the lower 32 bits of
66 * registers in interrupt handling. So if we get hit by an interrupt while
67 * using the upper 32 bits of a register, we lose.
68 */
69
90static inline uint64_t 70static inline uint64_t
91nlm_read_reg64(uint64_t base, uint32_t reg) 71nlm_read_reg64(uint64_t base, uint32_t reg)
92{ 72{
93 uint64_t addr = base + (reg >> 1) * sizeof(uint64_t); 73 uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
94 volatile uint64_t *ptr = (volatile uint64_t *)(long)addr; 74 volatile uint64_t *ptr = (volatile uint64_t *)(long)addr;
95 75 uint64_t val;
96 return *ptr; 76
77 if (sizeof(unsigned long) == 4) {
78 unsigned long flags;
79
80 local_irq_save(flags);
81 __asm__ __volatile__(
82 ".set push" "\n\t"
83 ".set mips64" "\n\t"
84 "ld %L0, %1" "\n\t"
85 "dsra32 %M0, %L0, 0" "\n\t"
86 "sll %L0, %L0, 0" "\n\t"
87 ".set pop" "\n"
88 : "=r" (val)
89 : "m" (*ptr));
90 local_irq_restore(flags);
91 } else
92 val = *ptr;
93
94 return val;
97} 95}
98 96
99static inline void 97static inline void
@@ -102,7 +100,25 @@ nlm_write_reg64(uint64_t base, uint32_t reg, uint64_t val)
102 uint64_t addr = base + (reg >> 1) * sizeof(uint64_t); 100 uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
103 volatile uint64_t *ptr = (volatile uint64_t *)(long)addr; 101 volatile uint64_t *ptr = (volatile uint64_t *)(long)addr;
104 102
105 *ptr = val; 103 if (sizeof(unsigned long) == 4) {
104 unsigned long flags;
105 uint64_t tmp;
106
107 local_irq_save(flags);
108 __asm__ __volatile__(
109 ".set push" "\n\t"
110 ".set mips64" "\n\t"
111 "dsll32 %L0, %L0, 0" "\n\t"
112 "dsrl32 %L0, %L0, 0" "\n\t"
113 "dsll32 %M0, %M0, 0" "\n\t"
114 "or %L0, %L0, %M0" "\n\t"
115 "sd %L0, %2" "\n\t"
116 ".set pop" "\n"
117 : "=r" (tmp)
118 : "0" (val), "m" (*ptr));
119 local_irq_restore(flags);
120 } else
121 *ptr = val;
106} 122}
107 123
108/* 124/*
@@ -143,14 +159,6 @@ nlm_pcicfg_base(uint32_t devoffset)
143 return nlm_io_base + devoffset; 159 return nlm_io_base + devoffset;
144} 160}
145 161
146static inline uint64_t
147nlm_xkphys_map_pcibar0(uint64_t pcibase)
148{
149 uint64_t paddr;
150
151 paddr = nlm_read_reg(pcibase, 0x4) & ~0xfu;
152 return (uint64_t)0x9000000000000000 | paddr;
153}
154#elif defined(CONFIG_CPU_XLR) 162#elif defined(CONFIG_CPU_XLR)
155 163
156static inline uint64_t 164static inline uint64_t
diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h
index 8ad2e0f81719..f299d31d7c1a 100644
--- a/arch/mips/include/asm/netlogic/mips-extns.h
+++ b/arch/mips/include/asm/netlogic/mips-extns.h
@@ -38,21 +38,16 @@
38/* 38/*
39 * XLR and XLP interrupt request and interrupt mask registers 39 * XLR and XLP interrupt request and interrupt mask registers
40 */ 40 */
41#define read_c0_eirr() __read_64bit_c0_register($9, 6)
42#define read_c0_eimr() __read_64bit_c0_register($9, 7)
43#define write_c0_eirr(val) __write_64bit_c0_register($9, 6, val)
44
45/* 41/*
46 * Writing EIMR in 32 bit is a special case, the lower 8 bit of the 42 * NOTE: Do not save/restore flags around write_c0_eimr().
47 * EIMR is shadowed in the status register, so we cannot save and 43 * On non-R2 platforms the flags has part of EIMR that is shadowed in STATUS
48 * restore status register for split read. 44 * register. Restoring flags will overwrite the lower 8 bits of EIMR.
45 *
46 * Call with interrupts disabled.
49 */ 47 */
50#define write_c0_eimr(val) \ 48#define write_c0_eimr(val) \
51do { \ 49do { \
52 if (sizeof(unsigned long) == 4) { \ 50 if (sizeof(unsigned long) == 4) { \
53 unsigned long __flags; \
54 \
55 local_irq_save(__flags); \
56 __asm__ __volatile__( \ 51 __asm__ __volatile__( \
57 ".set\tmips64\n\t" \ 52 ".set\tmips64\n\t" \
58 "dsll\t%L0, %L0, 32\n\t" \ 53 "dsll\t%L0, %L0, 32\n\t" \
@@ -62,8 +57,6 @@ do { \
62 "dmtc0\t%L0, $9, 7\n\t" \ 57 "dmtc0\t%L0, $9, 7\n\t" \
63 ".set\tmips0" \ 58 ".set\tmips0" \
64 : : "r" (val)); \ 59 : : "r" (val)); \
65 __flags = (__flags & 0xffff00ff) | (((val) & 0xff) << 8);\
66 local_irq_restore(__flags); \
67 } else \ 60 } else \
68 __write_64bit_c0_register($9, 7, (val)); \ 61 __write_64bit_c0_register($9, 7, (val)); \
69} while (0) 62} while (0)
@@ -128,7 +121,7 @@ static inline uint64_t read_c0_eirr_and_eimr(void)
128 uint64_t val; 121 uint64_t val;
129 122
130#ifdef CONFIG_64BIT 123#ifdef CONFIG_64BIT
131 val = read_c0_eimr() & read_c0_eirr(); 124 val = __read_64bit_c0_register($9, 6) & __read_64bit_c0_register($9, 7);
132#else 125#else
133 __asm__ __volatile__( 126 __asm__ __volatile__(
134 ".set push\n\t" 127 ".set push\n\t"
@@ -143,7 +136,6 @@ static inline uint64_t read_c0_eirr_and_eimr(void)
143 ".set pop" 136 ".set pop"
144 : "=r" (val)); 137 : "=r" (val));
145#endif 138#endif
146
147 return val; 139 return val;
148} 140}
149 141
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pic.h b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
index 3df53017fe51..a981f4681a15 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/pic.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
@@ -191,59 +191,6 @@
191#define PIC_IRT_PCIE_LINK_2_INDEX 80 191#define PIC_IRT_PCIE_LINK_2_INDEX 80
192#define PIC_IRT_PCIE_LINK_3_INDEX 81 192#define PIC_IRT_PCIE_LINK_3_INDEX 81
193#define PIC_IRT_PCIE_LINK_INDEX(num) ((num) + PIC_IRT_PCIE_LINK_0_INDEX) 193#define PIC_IRT_PCIE_LINK_INDEX(num) ((num) + PIC_IRT_PCIE_LINK_0_INDEX)
194/* 78 to 81 */
195#define PIC_NUM_NA_IRTS 32
196/* 82 to 113 */
197#define PIC_IRT_NA_0_INDEX 82
198#define PIC_IRT_NA_INDEX(num) ((num) + PIC_IRT_NA_0_INDEX)
199#define PIC_IRT_POE_INDEX 114
200
201#define PIC_NUM_USB_IRTS 6
202#define PIC_IRT_USB_0_INDEX 115
203#define PIC_IRT_EHCI_0_INDEX 115
204#define PIC_IRT_OHCI_0_INDEX 116
205#define PIC_IRT_OHCI_1_INDEX 117
206#define PIC_IRT_EHCI_1_INDEX 118
207#define PIC_IRT_OHCI_2_INDEX 119
208#define PIC_IRT_OHCI_3_INDEX 120
209#define PIC_IRT_USB_INDEX(num) ((num) + PIC_IRT_USB_0_INDEX)
210/* 115 to 120 */
211#define PIC_IRT_GDX_INDEX 121
212#define PIC_IRT_SEC_INDEX 122
213#define PIC_IRT_RSA_INDEX 123
214
215#define PIC_NUM_COMP_IRTS 4
216#define PIC_IRT_COMP_0_INDEX 124
217#define PIC_IRT_COMP_INDEX(num) ((num) + PIC_IRT_COMP_0_INDEX)
218/* 124 to 127 */
219#define PIC_IRT_GBU_INDEX 128
220#define PIC_IRT_ICC_0_INDEX 129 /* ICC - Inter Chip Coherency */
221#define PIC_IRT_ICC_1_INDEX 130
222#define PIC_IRT_ICC_2_INDEX 131
223#define PIC_IRT_CAM_INDEX 132
224#define PIC_IRT_UART_0_INDEX 133
225#define PIC_IRT_UART_1_INDEX 134
226#define PIC_IRT_I2C_0_INDEX 135
227#define PIC_IRT_I2C_1_INDEX 136
228#define PIC_IRT_SYS_0_INDEX 137
229#define PIC_IRT_SYS_1_INDEX 138
230#define PIC_IRT_JTAG_INDEX 139
231#define PIC_IRT_PIC_INDEX 140
232#define PIC_IRT_NBU_INDEX 141
233#define PIC_IRT_TCU_INDEX 142
234#define PIC_IRT_GCU_INDEX 143 /* GBC - Global Coherency */
235#define PIC_IRT_DMC_0_INDEX 144
236#define PIC_IRT_DMC_1_INDEX 145
237
238#define PIC_NUM_GPIO_IRTS 4
239#define PIC_IRT_GPIO_0_INDEX 146
240#define PIC_IRT_GPIO_INDEX(num) ((num) + PIC_IRT_GPIO_0_INDEX)
241
242/* 146 to 149 */
243#define PIC_IRT_NOR_INDEX 150
244#define PIC_IRT_NAND_INDEX 151
245#define PIC_IRT_SPI_INDEX 152
246#define PIC_IRT_MMC_INDEX 153
247 194
248#define PIC_CLOCK_TIMER 7 195#define PIC_CLOCK_TIMER 7
249#define PIC_IRQ_BASE 8 196#define PIC_IRQ_BASE 8
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/usb.h b/arch/mips/include/asm/netlogic/xlp-hal/usb.h
deleted file mode 100644
index a9cd350dfb6c..000000000000
--- a/arch/mips/include/asm/netlogic/xlp-hal/usb.h
+++ /dev/null
@@ -1,64 +0,0 @@
1/*
2 * Copyright (c) 2003-2012 Broadcom Corporation
3 * All Rights Reserved
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the Broadcom
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#ifndef __NLM_HAL_USB_H__
36#define __NLM_HAL_USB_H__
37
38#define USB_CTL_0 0x01
39#define USB_PHY_0 0x0A
40#define USB_PHY_RESET 0x01
41#define USB_PHY_PORT_RESET_0 0x10
42#define USB_PHY_PORT_RESET_1 0x20
43#define USB_CONTROLLER_RESET 0x01
44#define USB_INT_STATUS 0x0E
45#define USB_INT_EN 0x0F
46#define USB_PHY_INTERRUPT_EN 0x01
47#define USB_OHCI_INTERRUPT_EN 0x02
48#define USB_OHCI_INTERRUPT1_EN 0x04
49#define USB_OHCI_INTERRUPT2_EN 0x08
50#define USB_CTRL_INTERRUPT_EN 0x10
51
52#ifndef __ASSEMBLY__
53
54#define nlm_read_usb_reg(b, r) nlm_read_reg(b, r)
55#define nlm_write_usb_reg(b, r, v) nlm_write_reg(b, r, v)
56#define nlm_get_usb_pcibase(node, inst) \
57 nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst))
58#define nlm_get_usb_hcd_base(node, inst) \
59 nlm_xkphys_map_pcibar0(nlm_get_usb_pcibase(node, inst))
60#define nlm_get_usb_regbase(node, inst) \
61 (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
62
63#endif
64#endif /* __NLM_HAL_USB_H__ */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index fdc62fb5630d..8b8f6b393363 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -8,6 +8,7 @@
8#ifndef _ASM_PGTABLE_H 8#ifndef _ASM_PGTABLE_H
9#define _ASM_PGTABLE_H 9#define _ASM_PGTABLE_H
10 10
11#include <linux/mm_types.h>
11#include <linux/mmzone.h> 12#include <linux/mmzone.h>
12#ifdef CONFIG_32BIT 13#ifdef CONFIG_32BIT
13#include <asm/pgtable-32.h> 14#include <asm/pgtable-32.h>
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 2a5fa7abb346..71686c897dea 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -44,11 +44,16 @@ extern unsigned int vced_count, vcei_count;
44#define SPECIAL_PAGES_SIZE PAGE_SIZE 44#define SPECIAL_PAGES_SIZE PAGE_SIZE
45 45
46#ifdef CONFIG_32BIT 46#ifdef CONFIG_32BIT
47#ifdef CONFIG_KVM_GUEST
48/* User space process size is limited to 1GB in KVM Guest Mode */
49#define TASK_SIZE 0x3fff8000UL
50#else
47/* 51/*
48 * User space process size: 2GB. This is hardcoded into a few places, 52 * User space process size: 2GB. This is hardcoded into a few places,
49 * so don't change it unless you know what you are doing. 53 * so don't change it unless you know what you are doing.
50 */ 54 */
51#define TASK_SIZE 0x7fff8000UL 55#define TASK_SIZE 0x7fff8000UL
56#endif
52 57
53#ifdef __KERNEL__ 58#ifdef __KERNEL__
54#define STACK_TOP_MAX TASK_SIZE 59#define STACK_TOP_MAX TASK_SIZE
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index 8808bf548b99..1e7e0961064b 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -48,4 +48,7 @@ extern void __dt_setup_arch(struct boot_param_header *bph);
48static inline void device_tree_init(void) { } 48static inline void device_tree_init(void) { }
49#endif /* CONFIG_OF */ 49#endif /* CONFIG_OF */
50 50
51extern char *mips_get_machine_name(void);
52extern void mips_set_machine_name(const char *name);
53
51#endif /* __ASM_PROM_H */ 54#endif /* __ASM_PROM_H */
diff --git a/arch/mips/include/asm/sn/sn_private.h b/arch/mips/include/asm/sn/sn_private.h
index 1a2c3025bf28..fdfae43d8b99 100644
--- a/arch/mips/include/asm/sn/sn_private.h
+++ b/arch/mips/include/asm/sn/sn_private.h
@@ -14,6 +14,6 @@ extern void install_cpu_nmi_handler(int slice);
14extern void install_ipi(void); 14extern void install_ipi(void);
15extern void setup_replication_mask(void); 15extern void setup_replication_mask(void);
16extern void replicate_kernel_text(void); 16extern void replicate_kernel_text(void);
17extern pfn_t node_getfirstfree(cnodeid_t); 17extern unsigned long node_getfirstfree(cnodeid_t);
18 18
19#endif /* __ASM_SN_SN_PRIVATE_H */ 19#endif /* __ASM_SN_SN_PRIVATE_H */
diff --git a/arch/mips/include/asm/sn/types.h b/arch/mips/include/asm/sn/types.h
index c4813d67aec3..6d24d4e8b9ed 100644
--- a/arch/mips/include/asm/sn/types.h
+++ b/arch/mips/include/asm/sn/types.h
@@ -19,7 +19,6 @@ typedef signed char partid_t; /* partition ID type */
19typedef signed short moduleid_t; /* user-visible module number type */ 19typedef signed short moduleid_t; /* user-visible module number type */
20typedef signed short cmoduleid_t; /* kernel compact module id type */ 20typedef signed short cmoduleid_t; /* kernel compact module id type */
21typedef unsigned char clusterid_t; /* Clusterid of the cell */ 21typedef unsigned char clusterid_t; /* Clusterid of the cell */
22typedef unsigned long pfn_t;
23 22
24typedef dev_t vertex_hdl_t; /* hardware graph vertex handle */ 23typedef dev_t vertex_hdl_t; /* hardware graph vertex handle */
25 24
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 5130c88d6420..78d201fb6c87 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -71,7 +71,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
71 " nop \n" 71 " nop \n"
72 " srl %[my_ticket], %[ticket], 16 \n" 72 " srl %[my_ticket], %[ticket], 16 \n"
73 " andi %[ticket], %[ticket], 0xffff \n" 73 " andi %[ticket], %[ticket], 0xffff \n"
74 " andi %[my_ticket], %[my_ticket], 0xffff \n"
75 " bne %[ticket], %[my_ticket], 4f \n" 74 " bne %[ticket], %[my_ticket], 4f \n"
76 " subu %[ticket], %[my_ticket], %[ticket] \n" 75 " subu %[ticket], %[my_ticket], %[ticket] \n"
77 "2: \n" 76 "2: \n"
@@ -105,7 +104,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
105 " beqz %[my_ticket], 1b \n" 104 " beqz %[my_ticket], 1b \n"
106 " srl %[my_ticket], %[ticket], 16 \n" 105 " srl %[my_ticket], %[ticket], 16 \n"
107 " andi %[ticket], %[ticket], 0xffff \n" 106 " andi %[ticket], %[ticket], 0xffff \n"
108 " andi %[my_ticket], %[my_ticket], 0xffff \n"
109 " bne %[ticket], %[my_ticket], 4f \n" 107 " bne %[ticket], %[my_ticket], 4f \n"
110 " subu %[ticket], %[my_ticket], %[ticket] \n" 108 " subu %[ticket], %[my_ticket], %[ticket] \n"
111 "2: \n" 109 "2: \n"
@@ -153,7 +151,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
153 " \n" 151 " \n"
154 "1: ll %[ticket], %[ticket_ptr] \n" 152 "1: ll %[ticket], %[ticket_ptr] \n"
155 " srl %[my_ticket], %[ticket], 16 \n" 153 " srl %[my_ticket], %[ticket], 16 \n"
156 " andi %[my_ticket], %[my_ticket], 0xffff \n"
157 " andi %[now_serving], %[ticket], 0xffff \n" 154 " andi %[now_serving], %[ticket], 0xffff \n"
158 " bne %[my_ticket], %[now_serving], 3f \n" 155 " bne %[my_ticket], %[now_serving], 3f \n"
159 " addu %[ticket], %[ticket], %[inc] \n" 156 " addu %[ticket], %[ticket], %[inc] \n"
@@ -178,7 +175,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
178 " \n" 175 " \n"
179 "1: ll %[ticket], %[ticket_ptr] \n" 176 "1: ll %[ticket], %[ticket_ptr] \n"
180 " srl %[my_ticket], %[ticket], 16 \n" 177 " srl %[my_ticket], %[ticket], 16 \n"
181 " andi %[my_ticket], %[my_ticket], 0xffff \n"
182 " andi %[now_serving], %[ticket], 0xffff \n" 178 " andi %[now_serving], %[ticket], 0xffff \n"
183 " bne %[my_ticket], %[now_serving], 3f \n" 179 " bne %[my_ticket], %[now_serving], 3f \n"
184 " addu %[ticket], %[ticket], %[inc] \n" 180 " addu %[ticket], %[ticket], %[inc] \n"
@@ -242,25 +238,16 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
242 : "m" (rw->lock) 238 : "m" (rw->lock)
243 : "memory"); 239 : "memory");
244 } else { 240 } else {
245 __asm__ __volatile__( 241 do {
246 " .set noreorder # arch_read_lock \n" 242 __asm__ __volatile__(
247 "1: ll %1, %2 \n" 243 "1: ll %1, %2 # arch_read_lock \n"
248 " bltz %1, 3f \n" 244 " bltz %1, 1b \n"
249 " addu %1, 1 \n" 245 " addu %1, 1 \n"
250 "2: sc %1, %0 \n" 246 "2: sc %1, %0 \n"
251 " beqz %1, 1b \n" 247 : "=m" (rw->lock), "=&r" (tmp)
252 " nop \n" 248 : "m" (rw->lock)
253 " .subsection 2 \n" 249 : "memory");
254 "3: ll %1, %2 \n" 250 } while (unlikely(!tmp));
255 " bltz %1, 3b \n"
256 " addu %1, 1 \n"
257 " b 2b \n"
258 " nop \n"
259 " .previous \n"
260 " .set reorder \n"
261 : "=m" (rw->lock), "=&r" (tmp)
262 : "m" (rw->lock)
263 : "memory");
264 } 251 }
265 252
266 smp_llsc_mb(); 253 smp_llsc_mb();
@@ -285,21 +272,15 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
285 : "m" (rw->lock) 272 : "m" (rw->lock)
286 : "memory"); 273 : "memory");
287 } else { 274 } else {
288 __asm__ __volatile__( 275 do {
289 " .set noreorder # arch_read_unlock \n" 276 __asm__ __volatile__(
290 "1: ll %1, %2 \n" 277 "1: ll %1, %2 # arch_read_unlock \n"
291 " sub %1, 1 \n" 278 " sub %1, 1 \n"
292 " sc %1, %0 \n" 279 " sc %1, %0 \n"
293 " beqz %1, 2f \n" 280 : "=m" (rw->lock), "=&r" (tmp)
294 " nop \n" 281 : "m" (rw->lock)
295 " .subsection 2 \n" 282 : "memory");
296 "2: b 1b \n" 283 } while (unlikely(!tmp));
297 " nop \n"
298 " .previous \n"
299 " .set reorder \n"
300 : "=m" (rw->lock), "=&r" (tmp)
301 : "m" (rw->lock)
302 : "memory");
303 } 284 }
304} 285}
305 286
@@ -321,25 +302,16 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
321 : "m" (rw->lock) 302 : "m" (rw->lock)
322 : "memory"); 303 : "memory");
323 } else { 304 } else {
324 __asm__ __volatile__( 305 do {
325 " .set noreorder # arch_write_lock \n" 306 __asm__ __volatile__(
326 "1: ll %1, %2 \n" 307 "1: ll %1, %2 # arch_write_lock \n"
327 " bnez %1, 3f \n" 308 " bnez %1, 1b \n"
328 " lui %1, 0x8000 \n" 309 " lui %1, 0x8000 \n"
329 "2: sc %1, %0 \n" 310 "2: sc %1, %0 \n"
330 " beqz %1, 3f \n" 311 : "=m" (rw->lock), "=&r" (tmp)
331 " nop \n" 312 : "m" (rw->lock)
332 " .subsection 2 \n" 313 : "memory");
333 "3: ll %1, %2 \n" 314 } while (unlikely(!tmp));
334 " bnez %1, 3b \n"
335 " lui %1, 0x8000 \n"
336 " b 2b \n"
337 " nop \n"
338 " .previous \n"
339 " .set reorder \n"
340 : "=m" (rw->lock), "=&r" (tmp)
341 : "m" (rw->lock)
342 : "memory");
343 } 315 }
344 316
345 smp_llsc_mb(); 317 smp_llsc_mb();
@@ -424,25 +396,21 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
424 : "m" (rw->lock) 396 : "m" (rw->lock)
425 : "memory"); 397 : "memory");
426 } else { 398 } else {
427 __asm__ __volatile__( 399 do {
428 " .set noreorder # arch_write_trylock \n" 400 __asm__ __volatile__(
429 " li %2, 0 \n" 401 " ll %1, %3 # arch_write_trylock \n"
430 "1: ll %1, %3 \n" 402 " li %2, 0 \n"
431 " bnez %1, 2f \n" 403 " bnez %1, 2f \n"
432 " lui %1, 0x8000 \n" 404 " lui %1, 0x8000 \n"
433 " sc %1, %0 \n" 405 " sc %1, %0 \n"
434 " beqz %1, 3f \n" 406 " li %2, 1 \n"
435 " li %2, 1 \n" 407 "2: \n"
436 "2: \n" 408 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
437 __WEAK_LLSC_MB 409 : "m" (rw->lock)
438 " .subsection 2 \n" 410 : "memory");
439 "3: b 1b \n" 411 } while (unlikely(!tmp));
440 " li %2, 0 \n" 412
441 " .previous \n" 413 smp_llsc_mb();
442 " .set reorder \n"
443 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
444 : "m" (rw->lock)
445 : "memory");
446 } 414 }
447 415
448 return ret; 416 return ret;
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index c99384018161..a89d1b10d027 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -139,7 +139,7 @@
1391: move ra, k0 1391: move ra, k0
140 li k0, 3 140 li k0, 3
141 mtc0 k0, $22 141 mtc0 k0, $22
142#endif /* CONFIG_CPU_LOONGSON2F */ 142#endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
143#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 143#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
144 lui k1, %hi(kernelsp) 144 lui k1, %hi(kernelsp)
145#else 145#else
@@ -189,6 +189,7 @@
189 LONG_S $0, PT_R0(sp) 189 LONG_S $0, PT_R0(sp)
190 mfc0 v1, CP0_STATUS 190 mfc0 v1, CP0_STATUS
191 LONG_S $2, PT_R2(sp) 191 LONG_S $2, PT_R2(sp)
192 LONG_S v1, PT_STATUS(sp)
192#ifdef CONFIG_MIPS_MT_SMTC 193#ifdef CONFIG_MIPS_MT_SMTC
193 /* 194 /*
194 * Ideally, these instructions would be shuffled in 195 * Ideally, these instructions would be shuffled in
@@ -200,21 +201,20 @@
200 LONG_S k0, PT_TCSTATUS(sp) 201 LONG_S k0, PT_TCSTATUS(sp)
201#endif /* CONFIG_MIPS_MT_SMTC */ 202#endif /* CONFIG_MIPS_MT_SMTC */
202 LONG_S $4, PT_R4(sp) 203 LONG_S $4, PT_R4(sp)
203 LONG_S $5, PT_R5(sp)
204 LONG_S v1, PT_STATUS(sp)
205 mfc0 v1, CP0_CAUSE 204 mfc0 v1, CP0_CAUSE
206 LONG_S $6, PT_R6(sp) 205 LONG_S $5, PT_R5(sp)
207 LONG_S $7, PT_R7(sp)
208 LONG_S v1, PT_CAUSE(sp) 206 LONG_S v1, PT_CAUSE(sp)
207 LONG_S $6, PT_R6(sp)
209 MFC0 v1, CP0_EPC 208 MFC0 v1, CP0_EPC
209 LONG_S $7, PT_R7(sp)
210#ifdef CONFIG_64BIT 210#ifdef CONFIG_64BIT
211 LONG_S $8, PT_R8(sp) 211 LONG_S $8, PT_R8(sp)
212 LONG_S $9, PT_R9(sp) 212 LONG_S $9, PT_R9(sp)
213#endif 213#endif
214 LONG_S v1, PT_EPC(sp)
214 LONG_S $25, PT_R25(sp) 215 LONG_S $25, PT_R25(sp)
215 LONG_S $28, PT_R28(sp) 216 LONG_S $28, PT_R28(sp)
216 LONG_S $31, PT_R31(sp) 217 LONG_S $31, PT_R31(sp)
217 LONG_S v1, PT_EPC(sp)
218 ori $28, sp, _THREAD_MASK 218 ori $28, sp, _THREAD_MASK
219 xori $28, _THREAD_MASK 219 xori $28, _THREAD_MASK
220#ifdef CONFIG_CPU_CAVIUM_OCTEON 220#ifdef CONFIG_CPU_CAVIUM_OCTEON
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 178f7924149a..895320e25662 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -58,8 +58,12 @@ struct thread_info {
58#define init_stack (init_thread_union.stack) 58#define init_stack (init_thread_union.stack)
59 59
60/* How to get the thread information struct from C. */ 60/* How to get the thread information struct from C. */
61register struct thread_info *__current_thread_info __asm__("$28"); 61static inline struct thread_info *current_thread_info(void)
62#define current_thread_info() __current_thread_info 62{
63 register struct thread_info *__current_thread_info __asm__("$28");
64
65 return __current_thread_info;
66}
63 67
64#endif /* !__ASSEMBLY__ */ 68#endif /* !__ASSEMBLY__ */
65 69
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index debc8009bd58..2d7b9df4542d 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -52,13 +52,15 @@ extern int (*perf_irq)(void);
52 */ 52 */
53extern unsigned int __weak get_c0_compare_int(void); 53extern unsigned int __weak get_c0_compare_int(void);
54extern int r4k_clockevent_init(void); 54extern int r4k_clockevent_init(void);
55extern int smtc_clockevent_init(void);
56extern int gic_clockevent_init(void);
55 57
56static inline int mips_clockevent_init(void) 58static inline int mips_clockevent_init(void)
57{ 59{
58#ifdef CONFIG_MIPS_MT_SMTC 60#ifdef CONFIG_MIPS_MT_SMTC
59 extern int smtc_clockevent_init(void);
60
61 return smtc_clockevent_init(); 61 return smtc_clockevent_init();
62#elif defined(CONFIG_CEVT_GIC)
63 return (gic_clockevent_init() | r4k_clockevent_init());
62#elif defined(CONFIG_CEVT_R4K) 64#elif defined(CONFIG_CEVT_R4K)
63 return r4k_clockevent_init(); 65 return r4k_clockevent_init();
64#else 66#else
@@ -69,9 +71,7 @@ static inline int mips_clockevent_init(void)
69/* 71/*
70 * Initialize the count register as a clocksource 72 * Initialize the count register as a clocksource
71 */ 73 */
72#ifdef CONFIG_CSRC_R4K
73extern int init_r4k_clocksource(void); 74extern int init_r4k_clocksource(void);
74#endif
75 75
76static inline int init_mips_clocksource(void) 76static inline int init_mips_clocksource(void)
77{ 77{
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index bd87e36bf26a..f3fa3750f577 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -23,7 +23,11 @@
23 */ 23 */
24#ifdef CONFIG_32BIT 24#ifdef CONFIG_32BIT
25 25
26#define __UA_LIMIT 0x80000000UL 26#ifdef CONFIG_KVM_GUEST
27#define __UA_LIMIT 0x40000000UL
28#else
29#define __UA_LIMIT 0x80000000UL
30#endif
27 31
28#define __UA_ADDR ".word" 32#define __UA_ADDR ".word"
29#define __UA_LA "la" 33#define __UA_LA "la"
@@ -55,8 +59,13 @@ extern u64 __ua_limit;
55 * address in this range it's the process's problem, not ours :-) 59 * address in this range it's the process's problem, not ours :-)
56 */ 60 */
57 61
62#ifdef CONFIG_KVM_GUEST
63#define KERNEL_DS ((mm_segment_t) { 0x80000000UL })
64#define USER_DS ((mm_segment_t) { 0xC0000000UL })
65#else
58#define KERNEL_DS ((mm_segment_t) { 0UL }) 66#define KERNEL_DS ((mm_segment_t) { 0UL })
59#define USER_DS ((mm_segment_t) { __UA_LIMIT }) 67#define USER_DS ((mm_segment_t) { __UA_LIMIT })
68#endif
60 69
61#define VERIFY_READ 0 70#define VERIFY_READ 0
62#define VERIFY_WRITE 1 71#define VERIFY_WRITE 1
@@ -261,6 +270,7 @@ do { \
261 __asm__ __volatile__( \ 270 __asm__ __volatile__( \
262 "1: " insn " %1, %3 \n" \ 271 "1: " insn " %1, %3 \n" \
263 "2: \n" \ 272 "2: \n" \
273 " .insn \n" \
264 " .section .fixup,\"ax\" \n" \ 274 " .section .fixup,\"ax\" \n" \
265 "3: li %0, %4 \n" \ 275 "3: li %0, %4 \n" \
266 " j 2b \n" \ 276 " j 2b \n" \
@@ -287,7 +297,9 @@ do { \
287 __asm__ __volatile__( \ 297 __asm__ __volatile__( \
288 "1: lw %1, (%3) \n" \ 298 "1: lw %1, (%3) \n" \
289 "2: lw %D1, 4(%3) \n" \ 299 "2: lw %D1, 4(%3) \n" \
290 "3: .section .fixup,\"ax\" \n" \ 300 "3: \n" \
301 " .insn \n" \
302 " .section .fixup,\"ax\" \n" \
291 "4: li %0, %4 \n" \ 303 "4: li %0, %4 \n" \
292 " move %1, $0 \n" \ 304 " move %1, $0 \n" \
293 " move %D1, $0 \n" \ 305 " move %D1, $0 \n" \
@@ -355,6 +367,7 @@ do { \
355 __asm__ __volatile__( \ 367 __asm__ __volatile__( \
356 "1: " insn " %z2, %3 # __put_user_asm\n" \ 368 "1: " insn " %z2, %3 # __put_user_asm\n" \
357 "2: \n" \ 369 "2: \n" \
370 " .insn \n" \
358 " .section .fixup,\"ax\" \n" \ 371 " .section .fixup,\"ax\" \n" \
359 "3: li %0, %4 \n" \ 372 "3: li %0, %4 \n" \
360 " j 2b \n" \ 373 " j 2b \n" \
@@ -373,6 +386,7 @@ do { \
373 "1: sw %2, (%3) # __put_user_asm_ll32 \n" \ 386 "1: sw %2, (%3) # __put_user_asm_ll32 \n" \
374 "2: sw %D2, 4(%3) \n" \ 387 "2: sw %D2, 4(%3) \n" \
375 "3: \n" \ 388 "3: \n" \
389 " .insn \n" \
376 " .section .fixup,\"ax\" \n" \ 390 " .section .fixup,\"ax\" \n" \
377 "4: li %0, %4 \n" \ 391 "4: li %0, %4 \n" \
378 " j 3b \n" \ 392 " j 3b \n" \
@@ -524,6 +538,7 @@ do { \
524 __asm__ __volatile__( \ 538 __asm__ __volatile__( \
525 "1: " insn " %1, %3 \n" \ 539 "1: " insn " %1, %3 \n" \
526 "2: \n" \ 540 "2: \n" \
541 " .insn \n" \
527 " .section .fixup,\"ax\" \n" \ 542 " .section .fixup,\"ax\" \n" \
528 "3: li %0, %4 \n" \ 543 "3: li %0, %4 \n" \
529 " j 2b \n" \ 544 " j 2b \n" \
@@ -549,7 +564,9 @@ do { \
549 "1: ulw %1, (%3) \n" \ 564 "1: ulw %1, (%3) \n" \
550 "2: ulw %D1, 4(%3) \n" \ 565 "2: ulw %D1, 4(%3) \n" \
551 " move %0, $0 \n" \ 566 " move %0, $0 \n" \
552 "3: .section .fixup,\"ax\" \n" \ 567 "3: \n" \
568 " .insn \n" \
569 " .section .fixup,\"ax\" \n" \
553 "4: li %0, %4 \n" \ 570 "4: li %0, %4 \n" \
554 " move %1, $0 \n" \ 571 " move %1, $0 \n" \
555 " move %D1, $0 \n" \ 572 " move %D1, $0 \n" \
@@ -616,6 +633,7 @@ do { \
616 __asm__ __volatile__( \ 633 __asm__ __volatile__( \
617 "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \ 634 "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
618 "2: \n" \ 635 "2: \n" \
636 " .insn \n" \
619 " .section .fixup,\"ax\" \n" \ 637 " .section .fixup,\"ax\" \n" \
620 "3: li %0, %4 \n" \ 638 "3: li %0, %4 \n" \
621 " j 2b \n" \ 639 " j 2b \n" \
@@ -634,6 +652,7 @@ do { \
634 "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \ 652 "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
635 "2: sw %D2, 4(%3) \n" \ 653 "2: sw %D2, 4(%3) \n" \
636 "3: \n" \ 654 "3: \n" \
655 " .insn \n" \
637 " .section .fixup,\"ax\" \n" \ 656 " .section .fixup,\"ax\" \n" \
638 "4: li %0, %4 \n" \ 657 "4: li %0, %4 \n" \
639 " j 3b \n" \ 658 " j 3b \n" \
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 058e941626a6..370d967725c2 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -6,7 +6,7 @@
6 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 6 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
7 * Copyright (C) 2005 Maciej W. Rozycki 7 * Copyright (C) 2005 Maciej W. Rozycki
8 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 8 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
9 * Copyright (C) 2012 MIPS Technologies, Inc. 9 * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
10 */ 10 */
11 11
12#include <linux/types.h> 12#include <linux/types.h>
@@ -22,44 +22,75 @@
22#define UASM_EXPORT_SYMBOL(sym) 22#define UASM_EXPORT_SYMBOL(sym)
23#endif 23#endif
24 24
25#define _UASM_ISA_CLASSIC 0
26#define _UASM_ISA_MICROMIPS 1
27
28#ifndef UASM_ISA
29#ifdef CONFIG_CPU_MICROMIPS
30#define UASM_ISA _UASM_ISA_MICROMIPS
31#else
32#define UASM_ISA _UASM_ISA_CLASSIC
33#endif
34#endif
35
36#if (UASM_ISA == _UASM_ISA_CLASSIC)
37#ifdef CONFIG_CPU_MICROMIPS
38#define ISAOPC(op) CL_uasm_i##op
39#define ISAFUNC(x) CL_##x
40#else
41#define ISAOPC(op) uasm_i##op
42#define ISAFUNC(x) x
43#endif
44#elif (UASM_ISA == _UASM_ISA_MICROMIPS)
45#ifdef CONFIG_CPU_MICROMIPS
46#define ISAOPC(op) uasm_i##op
47#define ISAFUNC(x) x
48#else
49#define ISAOPC(op) MM_uasm_i##op
50#define ISAFUNC(x) MM_##x
51#endif
52#else
53#error Unsupported micro-assembler ISA!!!
54#endif
55
25#define Ip_u1u2u3(op) \ 56#define Ip_u1u2u3(op) \
26void __uasminit \ 57void __uasminit \
27uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) 58ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
28 59
29#define Ip_u2u1u3(op) \ 60#define Ip_u2u1u3(op) \
30void __uasminit \ 61void __uasminit \
31uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) 62ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
32 63
33#define Ip_u3u1u2(op) \ 64#define Ip_u3u1u2(op) \
34void __uasminit \ 65void __uasminit \
35uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) 66ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
36 67
37#define Ip_u1u2s3(op) \ 68#define Ip_u1u2s3(op) \
38void __uasminit \ 69void __uasminit \
39uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c) 70ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
40 71
41#define Ip_u2s3u1(op) \ 72#define Ip_u2s3u1(op) \
42void __uasminit \ 73void __uasminit \
43uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c) 74ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
44 75
45#define Ip_u2u1s3(op) \ 76#define Ip_u2u1s3(op) \
46void __uasminit \ 77void __uasminit \
47uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c) 78ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
48 79
49#define Ip_u2u1msbu3(op) \ 80#define Ip_u2u1msbu3(op) \
50void __uasminit \ 81void __uasminit \
51uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \ 82ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
52 unsigned int d) 83 unsigned int d)
53 84
54#define Ip_u1u2(op) \ 85#define Ip_u1u2(op) \
55void __uasminit uasm_i##op(u32 **buf, unsigned int a, unsigned int b) 86void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
56 87
57#define Ip_u1s2(op) \ 88#define Ip_u1s2(op) \
58void __uasminit uasm_i##op(u32 **buf, unsigned int a, signed int b) 89void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
59 90
60#define Ip_u1(op) void __uasminit uasm_i##op(u32 **buf, unsigned int a) 91#define Ip_u1(op) void __uasminit ISAOPC(op)(u32 **buf, unsigned int a)
61 92
62#define Ip_0(op) void __uasminit uasm_i##op(u32 **buf) 93#define Ip_0(op) void __uasminit ISAOPC(op)(u32 **buf)
63 94
64Ip_u2u1s3(_addiu); 95Ip_u2u1s3(_addiu);
65Ip_u3u1u2(_addu); 96Ip_u3u1u2(_addu);
@@ -132,19 +163,20 @@ struct uasm_label {
132 int lab; 163 int lab;
133}; 164};
134 165
135void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid); 166void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr,
167 int lid);
136#ifdef CONFIG_64BIT 168#ifdef CONFIG_64BIT
137int uasm_in_compat_space_p(long addr); 169int ISAFUNC(uasm_in_compat_space_p)(long addr);
138#endif 170#endif
139int uasm_rel_hi(long val); 171int ISAFUNC(uasm_rel_hi)(long val);
140int uasm_rel_lo(long val); 172int ISAFUNC(uasm_rel_lo)(long val);
141void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr); 173void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr);
142void UASM_i_LA(u32 **buf, unsigned int rs, long addr); 174void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr);
143 175
144#define UASM_L_LA(lb) \ 176#define UASM_L_LA(lb) \
145static inline void __uasminit uasm_l##lb(struct uasm_label **lab, u32 *addr) \ 177static inline void __uasminit ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \
146{ \ 178{ \
147 uasm_build_label(lab, addr, label##lb); \ 179 ISAFUNC(uasm_build_label)(lab, addr, label##lb); \
148} 180}
149 181
150/* convenience macros for instructions */ 182/* convenience macros for instructions */
@@ -196,27 +228,27 @@ static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1,
196 unsigned int a2, unsigned int a3) 228 unsigned int a2, unsigned int a3)
197{ 229{
198 if (a3 < 32) 230 if (a3 < 32)
199 uasm_i_drotr(p, a1, a2, a3); 231 ISAOPC(_drotr)(p, a1, a2, a3);
200 else 232 else
201 uasm_i_drotr32(p, a1, a2, a3 - 32); 233 ISAOPC(_drotr32)(p, a1, a2, a3 - 32);
202} 234}
203 235
204static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1, 236static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1,
205 unsigned int a2, unsigned int a3) 237 unsigned int a2, unsigned int a3)
206{ 238{
207 if (a3 < 32) 239 if (a3 < 32)
208 uasm_i_dsll(p, a1, a2, a3); 240 ISAOPC(_dsll)(p, a1, a2, a3);
209 else 241 else
210 uasm_i_dsll32(p, a1, a2, a3 - 32); 242 ISAOPC(_dsll32)(p, a1, a2, a3 - 32);
211} 243}
212 244
213static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1, 245static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1,
214 unsigned int a2, unsigned int a3) 246 unsigned int a2, unsigned int a3)
215{ 247{
216 if (a3 < 32) 248 if (a3 < 32)
217 uasm_i_dsrl(p, a1, a2, a3); 249 ISAOPC(_dsrl)(p, a1, a2, a3);
218 else 250 else
219 uasm_i_dsrl32(p, a1, a2, a3 - 32); 251 ISAOPC(_dsrl32)(p, a1, a2, a3 - 32);
220} 252}
221 253
222/* Handle relocations. */ 254/* Handle relocations. */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 4d078815eaa5..0f4aec2ad1e6 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright (C) 1996, 2000 by Ralf Baechle 8 * Copyright (C) 1996, 2000 by Ralf Baechle
9 * Copyright (C) 2006 by Thiemo Seufer 9 * Copyright (C) 2006 by Thiemo Seufer
10 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 */ 11 */
11#ifndef _UAPI_ASM_INST_H 12#ifndef _UAPI_ASM_INST_H
12#define _UAPI_ASM_INST_H 13#define _UAPI_ASM_INST_H
@@ -193,6 +194,282 @@ enum lx_func {
193}; 194};
194 195
195/* 196/*
197 * (microMIPS) Major opcodes.
198 */
199enum mm_major_op {
200 mm_pool32a_op, mm_pool16a_op, mm_lbu16_op, mm_move16_op,
201 mm_addi32_op, mm_lbu32_op, mm_sb32_op, mm_lb32_op,
202 mm_pool32b_op, mm_pool16b_op, mm_lhu16_op, mm_andi16_op,
203 mm_addiu32_op, mm_lhu32_op, mm_sh32_op, mm_lh32_op,
204 mm_pool32i_op, mm_pool16c_op, mm_lwsp16_op, mm_pool16d_op,
205 mm_ori32_op, mm_pool32f_op, mm_reserved1_op, mm_reserved2_op,
206 mm_pool32c_op, mm_lwgp16_op, mm_lw16_op, mm_pool16e_op,
207 mm_xori32_op, mm_jals32_op, mm_addiupc_op, mm_reserved3_op,
208 mm_reserved4_op, mm_pool16f_op, mm_sb16_op, mm_beqz16_op,
209 mm_slti32_op, mm_beq32_op, mm_swc132_op, mm_lwc132_op,
210 mm_reserved5_op, mm_reserved6_op, mm_sh16_op, mm_bnez16_op,
211 mm_sltiu32_op, mm_bne32_op, mm_sdc132_op, mm_ldc132_op,
212 mm_reserved7_op, mm_reserved8_op, mm_swsp16_op, mm_b16_op,
213 mm_andi32_op, mm_j32_op, mm_sd32_op, mm_ld32_op,
214 mm_reserved11_op, mm_reserved12_op, mm_sw16_op, mm_li16_op,
215 mm_jalx32_op, mm_jal32_op, mm_sw32_op, mm_lw32_op,
216};
217
218/*
219 * (microMIPS) POOL32I minor opcodes.
220 */
221enum mm_32i_minor_op {
222 mm_bltz_op, mm_bltzal_op, mm_bgez_op, mm_bgezal_op,
223 mm_blez_op, mm_bnezc_op, mm_bgtz_op, mm_beqzc_op,
224 mm_tlti_op, mm_tgei_op, mm_tltiu_op, mm_tgeiu_op,
225 mm_tnei_op, mm_lui_op, mm_teqi_op, mm_reserved13_op,
226 mm_synci_op, mm_bltzals_op, mm_reserved14_op, mm_bgezals_op,
227 mm_bc2f_op, mm_bc2t_op, mm_reserved15_op, mm_reserved16_op,
228 mm_reserved17_op, mm_reserved18_op, mm_bposge64_op, mm_bposge32_op,
229 mm_bc1f_op, mm_bc1t_op, mm_reserved19_op, mm_reserved20_op,
230 mm_bc1any2f_op, mm_bc1any2t_op, mm_bc1any4f_op, mm_bc1any4t_op,
231};
232
233/*
234 * (microMIPS) POOL32A minor opcodes.
235 */
236enum mm_32a_minor_op {
237 mm_sll32_op = 0x000,
238 mm_ins_op = 0x00c,
239 mm_ext_op = 0x02c,
240 mm_pool32axf_op = 0x03c,
241 mm_srl32_op = 0x040,
242 mm_sra_op = 0x080,
243 mm_rotr_op = 0x0c0,
244 mm_lwxs_op = 0x118,
245 mm_addu32_op = 0x150,
246 mm_subu32_op = 0x1d0,
247 mm_and_op = 0x250,
248 mm_or32_op = 0x290,
249 mm_xor32_op = 0x310,
250};
251
252/*
253 * (microMIPS) POOL32B functions.
254 */
255enum mm_32b_func {
256 mm_lwc2_func = 0x0,
257 mm_lwp_func = 0x1,
258 mm_ldc2_func = 0x2,
259 mm_ldp_func = 0x4,
260 mm_lwm32_func = 0x5,
261 mm_cache_func = 0x6,
262 mm_ldm_func = 0x7,
263 mm_swc2_func = 0x8,
264 mm_swp_func = 0x9,
265 mm_sdc2_func = 0xa,
266 mm_sdp_func = 0xc,
267 mm_swm32_func = 0xd,
268 mm_sdm_func = 0xf,
269};
270
271/*
272 * (microMIPS) POOL32C functions.
273 */
274enum mm_32c_func {
275 mm_pref_func = 0x2,
276 mm_ll_func = 0x3,
277 mm_swr_func = 0x9,
278 mm_sc_func = 0xb,
279 mm_lwu_func = 0xe,
280};
281
282/*
283 * (microMIPS) POOL32AXF minor opcodes.
284 */
285enum mm_32axf_minor_op {
286 mm_mfc0_op = 0x003,
287 mm_mtc0_op = 0x00b,
288 mm_tlbp_op = 0x00d,
289 mm_jalr_op = 0x03c,
290 mm_tlbr_op = 0x04d,
291 mm_jalrhb_op = 0x07c,
292 mm_tlbwi_op = 0x08d,
293 mm_tlbwr_op = 0x0cd,
294 mm_jalrs_op = 0x13c,
295 mm_jalrshb_op = 0x17c,
296 mm_syscall_op = 0x22d,
297 mm_eret_op = 0x3cd,
298};
299
300/*
301 * (microMIPS) POOL32F minor opcodes.
302 */
303enum mm_32f_minor_op {
304 mm_32f_00_op = 0x00,
305 mm_32f_01_op = 0x01,
306 mm_32f_02_op = 0x02,
307 mm_32f_10_op = 0x08,
308 mm_32f_11_op = 0x09,
309 mm_32f_12_op = 0x0a,
310 mm_32f_20_op = 0x10,
311 mm_32f_30_op = 0x18,
312 mm_32f_40_op = 0x20,
313 mm_32f_41_op = 0x21,
314 mm_32f_42_op = 0x22,
315 mm_32f_50_op = 0x28,
316 mm_32f_51_op = 0x29,
317 mm_32f_52_op = 0x2a,
318 mm_32f_60_op = 0x30,
319 mm_32f_70_op = 0x38,
320 mm_32f_73_op = 0x3b,
321 mm_32f_74_op = 0x3c,
322};
323
324/*
325 * (microMIPS) POOL32F secondary minor opcodes.
326 */
327enum mm_32f_10_minor_op {
328 mm_lwxc1_op = 0x1,
329 mm_swxc1_op,
330 mm_ldxc1_op,
331 mm_sdxc1_op,
332 mm_luxc1_op,
333 mm_suxc1_op,
334};
335
336enum mm_32f_func {
337 mm_lwxc1_func = 0x048,
338 mm_swxc1_func = 0x088,
339 mm_ldxc1_func = 0x0c8,
340 mm_sdxc1_func = 0x108,
341};
342
343/*
344 * (microMIPS) POOL32F secondary minor opcodes.
345 */
346enum mm_32f_40_minor_op {
347 mm_fmovf_op,
348 mm_fmovt_op,
349};
350
351/*
352 * (microMIPS) POOL32F secondary minor opcodes.
353 */
354enum mm_32f_60_minor_op {
355 mm_fadd_op,
356 mm_fsub_op,
357 mm_fmul_op,
358 mm_fdiv_op,
359};
360
361/*
362 * (microMIPS) POOL32F secondary minor opcodes.
363 */
364enum mm_32f_70_minor_op {
365 mm_fmovn_op,
366 mm_fmovz_op,
367};
368
369/*
370 * (microMIPS) POOL32FXF secondary minor opcodes for POOL32F.
371 */
372enum mm_32f_73_minor_op {
373 mm_fmov0_op = 0x01,
374 mm_fcvtl_op = 0x04,
375 mm_movf0_op = 0x05,
376 mm_frsqrt_op = 0x08,
377 mm_ffloorl_op = 0x0c,
378 mm_fabs0_op = 0x0d,
379 mm_fcvtw_op = 0x24,
380 mm_movt0_op = 0x25,
381 mm_fsqrt_op = 0x28,
382 mm_ffloorw_op = 0x2c,
383 mm_fneg0_op = 0x2d,
384 mm_cfc1_op = 0x40,
385 mm_frecip_op = 0x48,
386 mm_fceill_op = 0x4c,
387 mm_fcvtd0_op = 0x4d,
388 mm_ctc1_op = 0x60,
389 mm_fceilw_op = 0x6c,
390 mm_fcvts0_op = 0x6d,
391 mm_mfc1_op = 0x80,
392 mm_fmov1_op = 0x81,
393 mm_movf1_op = 0x85,
394 mm_ftruncl_op = 0x8c,
395 mm_fabs1_op = 0x8d,
396 mm_mtc1_op = 0xa0,
397 mm_movt1_op = 0xa5,
398 mm_ftruncw_op = 0xac,
399 mm_fneg1_op = 0xad,
400 mm_froundl_op = 0xcc,
401 mm_fcvtd1_op = 0xcd,
402 mm_froundw_op = 0xec,
403 mm_fcvts1_op = 0xed,
404};
405
406/*
407 * (microMIPS) POOL16C minor opcodes.
408 */
409enum mm_16c_minor_op {
410 mm_lwm16_op = 0x04,
411 mm_swm16_op = 0x05,
412 mm_jr16_op = 0x18,
413 mm_jrc_op = 0x1a,
414 mm_jalr16_op = 0x1c,
415 mm_jalrs16_op = 0x1e,
416};
417
418/*
419 * (microMIPS) POOL16D minor opcodes.
420 */
421enum mm_16d_minor_op {
422 mm_addius5_func,
423 mm_addiusp_func,
424};
425
426/*
427 * (MIPS16e) opcodes.
428 */
429enum MIPS16e_ops {
430 MIPS16e_jal_op = 003,
431 MIPS16e_ld_op = 007,
432 MIPS16e_i8_op = 014,
433 MIPS16e_sd_op = 017,
434 MIPS16e_lb_op = 020,
435 MIPS16e_lh_op = 021,
436 MIPS16e_lwsp_op = 022,
437 MIPS16e_lw_op = 023,
438 MIPS16e_lbu_op = 024,
439 MIPS16e_lhu_op = 025,
440 MIPS16e_lwpc_op = 026,
441 MIPS16e_lwu_op = 027,
442 MIPS16e_sb_op = 030,
443 MIPS16e_sh_op = 031,
444 MIPS16e_swsp_op = 032,
445 MIPS16e_sw_op = 033,
446 MIPS16e_rr_op = 035,
447 MIPS16e_extend_op = 036,
448 MIPS16e_i64_op = 037,
449};
450
451enum MIPS16e_i64_func {
452 MIPS16e_ldsp_func,
453 MIPS16e_sdsp_func,
454 MIPS16e_sdrasp_func,
455 MIPS16e_dadjsp_func,
456 MIPS16e_ldpc_func,
457};
458
459enum MIPS16e_rr_func {
460 MIPS16e_jr_func,
461};
462
463enum MIPS6e_i8_func {
464 MIPS16e_swrasp_func = 02,
465};
466
467/*
468 * (microMIPS & MIPS16e) NOP instruction.
469 */
470#define MM_NOP16 0x0c00
471
472/*
196 * Damn ... bitfields depend from byteorder :-( 473 * Damn ... bitfields depend from byteorder :-(
197 */ 474 */
198#ifdef __MIPSEB__ 475#ifdef __MIPSEB__
@@ -311,6 +588,262 @@ struct v_format { /* MDMX vector format */
311 ;))))))) 588 ;)))))))
312}; 589};
313 590
591/*
592 * microMIPS instruction formats (32-bit length)
593 *
594 * NOTE:
595 * Parenthesis denote whether the format is a microMIPS instruction or
596 * if it is MIPS32 instruction re-encoded for use in the microMIPS ASE.
597 */
598struct fb_format { /* FPU branch format (MIPS32) */
599 BITFIELD_FIELD(unsigned int opcode : 6,
600 BITFIELD_FIELD(unsigned int bc : 5,
601 BITFIELD_FIELD(unsigned int cc : 3,
602 BITFIELD_FIELD(unsigned int flag : 2,
603 BITFIELD_FIELD(signed int simmediate : 16,
604 ;)))))
605};
606
607struct fp0_format { /* FPU multiply and add format (MIPS32) */
608 BITFIELD_FIELD(unsigned int opcode : 6,
609 BITFIELD_FIELD(unsigned int fmt : 5,
610 BITFIELD_FIELD(unsigned int ft : 5,
611 BITFIELD_FIELD(unsigned int fs : 5,
612 BITFIELD_FIELD(unsigned int fd : 5,
613 BITFIELD_FIELD(unsigned int func : 6,
614 ;))))))
615};
616
617struct mm_fp0_format { /* FPU multipy and add format (microMIPS) */
618 BITFIELD_FIELD(unsigned int opcode : 6,
619 BITFIELD_FIELD(unsigned int ft : 5,
620 BITFIELD_FIELD(unsigned int fs : 5,
621 BITFIELD_FIELD(unsigned int fd : 5,
622 BITFIELD_FIELD(unsigned int fmt : 3,
623 BITFIELD_FIELD(unsigned int op : 2,
624 BITFIELD_FIELD(unsigned int func : 6,
625 ;)))))))
626};
627
628struct fp1_format { /* FPU mfc1 and cfc1 format (MIPS32) */
629 BITFIELD_FIELD(unsigned int opcode : 6,
630 BITFIELD_FIELD(unsigned int op : 5,
631 BITFIELD_FIELD(unsigned int rt : 5,
632 BITFIELD_FIELD(unsigned int fs : 5,
633 BITFIELD_FIELD(unsigned int fd : 5,
634 BITFIELD_FIELD(unsigned int func : 6,
635 ;))))))
636};
637
638struct mm_fp1_format { /* FPU mfc1 and cfc1 format (microMIPS) */
639 BITFIELD_FIELD(unsigned int opcode : 6,
640 BITFIELD_FIELD(unsigned int rt : 5,
641 BITFIELD_FIELD(unsigned int fs : 5,
642 BITFIELD_FIELD(unsigned int fmt : 2,
643 BITFIELD_FIELD(unsigned int op : 8,
644 BITFIELD_FIELD(unsigned int func : 6,
645 ;))))))
646};
647
648struct mm_fp2_format { /* FPU movt and movf format (microMIPS) */
649 BITFIELD_FIELD(unsigned int opcode : 6,
650 BITFIELD_FIELD(unsigned int fd : 5,
651 BITFIELD_FIELD(unsigned int fs : 5,
652 BITFIELD_FIELD(unsigned int cc : 3,
653 BITFIELD_FIELD(unsigned int zero : 2,
654 BITFIELD_FIELD(unsigned int fmt : 2,
655 BITFIELD_FIELD(unsigned int op : 3,
656 BITFIELD_FIELD(unsigned int func : 6,
657 ;))))))))
658};
659
660struct mm_fp3_format { /* FPU abs and neg format (microMIPS) */
661 BITFIELD_FIELD(unsigned int opcode : 6,
662 BITFIELD_FIELD(unsigned int rt : 5,
663 BITFIELD_FIELD(unsigned int fs : 5,
664 BITFIELD_FIELD(unsigned int fmt : 3,
665 BITFIELD_FIELD(unsigned int op : 7,
666 BITFIELD_FIELD(unsigned int func : 6,
667 ;))))))
668};
669
670struct mm_fp4_format { /* FPU c.cond format (microMIPS) */
671 BITFIELD_FIELD(unsigned int opcode : 6,
672 BITFIELD_FIELD(unsigned int rt : 5,
673 BITFIELD_FIELD(unsigned int fs : 5,
674 BITFIELD_FIELD(unsigned int cc : 3,
675 BITFIELD_FIELD(unsigned int fmt : 3,
676 BITFIELD_FIELD(unsigned int cond : 4,
677 BITFIELD_FIELD(unsigned int func : 6,
678 ;)))))))
679};
680
681struct mm_fp5_format { /* FPU lwxc1 and swxc1 format (microMIPS) */
682 BITFIELD_FIELD(unsigned int opcode : 6,
683 BITFIELD_FIELD(unsigned int index : 5,
684 BITFIELD_FIELD(unsigned int base : 5,
685 BITFIELD_FIELD(unsigned int fd : 5,
686 BITFIELD_FIELD(unsigned int op : 5,
687 BITFIELD_FIELD(unsigned int func : 6,
688 ;))))))
689};
690
691struct fp6_format { /* FPU madd and msub format (MIPS IV) */
692 BITFIELD_FIELD(unsigned int opcode : 6,
693 BITFIELD_FIELD(unsigned int fr : 5,
694 BITFIELD_FIELD(unsigned int ft : 5,
695 BITFIELD_FIELD(unsigned int fs : 5,
696 BITFIELD_FIELD(unsigned int fd : 5,
697 BITFIELD_FIELD(unsigned int func : 6,
698 ;))))))
699};
700
701struct mm_fp6_format { /* FPU madd and msub format (microMIPS) */
702 BITFIELD_FIELD(unsigned int opcode : 6,
703 BITFIELD_FIELD(unsigned int ft : 5,
704 BITFIELD_FIELD(unsigned int fs : 5,
705 BITFIELD_FIELD(unsigned int fd : 5,
706 BITFIELD_FIELD(unsigned int fr : 5,
707 BITFIELD_FIELD(unsigned int func : 6,
708 ;))))))
709};
710
711struct mm_i_format { /* Immediate format (microMIPS) */
712 BITFIELD_FIELD(unsigned int opcode : 6,
713 BITFIELD_FIELD(unsigned int rt : 5,
714 BITFIELD_FIELD(unsigned int rs : 5,
715 BITFIELD_FIELD(signed int simmediate : 16,
716 ;))))
717};
718
719struct mm_m_format { /* Multi-word load/store format (microMIPS) */
720 BITFIELD_FIELD(unsigned int opcode : 6,
721 BITFIELD_FIELD(unsigned int rd : 5,
722 BITFIELD_FIELD(unsigned int base : 5,
723 BITFIELD_FIELD(unsigned int func : 4,
724 BITFIELD_FIELD(signed int simmediate : 12,
725 ;)))))
726};
727
728struct mm_x_format { /* Scaled indexed load format (microMIPS) */
729 BITFIELD_FIELD(unsigned int opcode : 6,
730 BITFIELD_FIELD(unsigned int index : 5,
731 BITFIELD_FIELD(unsigned int base : 5,
732 BITFIELD_FIELD(unsigned int rd : 5,
733 BITFIELD_FIELD(unsigned int func : 11,
734 ;)))))
735};
736
737/*
738 * microMIPS instruction formats (16-bit length)
739 */
740struct mm_b0_format { /* Unconditional branch format (microMIPS) */
741 BITFIELD_FIELD(unsigned int opcode : 6,
742 BITFIELD_FIELD(signed int simmediate : 10,
743 BITFIELD_FIELD(unsigned int : 16, /* Ignored */
744 ;)))
745};
746
747struct mm_b1_format { /* Conditional branch format (microMIPS) */
748 BITFIELD_FIELD(unsigned int opcode : 6,
749 BITFIELD_FIELD(unsigned int rs : 3,
750 BITFIELD_FIELD(signed int simmediate : 7,
751 BITFIELD_FIELD(unsigned int : 16, /* Ignored */
752 ;))))
753};
754
755struct mm16_m_format { /* Multi-word load/store format */
756 BITFIELD_FIELD(unsigned int opcode : 6,
757 BITFIELD_FIELD(unsigned int func : 4,
758 BITFIELD_FIELD(unsigned int rlist : 2,
759 BITFIELD_FIELD(unsigned int imm : 4,
760 BITFIELD_FIELD(unsigned int : 16, /* Ignored */
761 ;)))))
762};
763
764struct mm16_rb_format { /* Signed immediate format */
765 BITFIELD_FIELD(unsigned int opcode : 6,
766 BITFIELD_FIELD(unsigned int rt : 3,
767 BITFIELD_FIELD(unsigned int base : 3,
768 BITFIELD_FIELD(signed int simmediate : 4,
769 BITFIELD_FIELD(unsigned int : 16, /* Ignored */
770 ;)))))
771};
772
773struct mm16_r3_format { /* Load from global pointer format */
774 BITFIELD_FIELD(unsigned int opcode : 6,
775 BITFIELD_FIELD(unsigned int rt : 3,
776 BITFIELD_FIELD(signed int simmediate : 7,
777 BITFIELD_FIELD(unsigned int : 16, /* Ignored */
778 ;))))
779};
780
781struct mm16_r5_format { /* Load/store from stack pointer format */
782 BITFIELD_FIELD(unsigned int opcode : 6,
783 BITFIELD_FIELD(unsigned int rt : 5,
784 BITFIELD_FIELD(signed int simmediate : 5,
785 BITFIELD_FIELD(unsigned int : 16, /* Ignored */
786 ;))))
787};
788
789/*
790 * MIPS16e instruction formats (16-bit length)
791 */
792struct m16e_rr {
793 BITFIELD_FIELD(unsigned int opcode : 5,
794 BITFIELD_FIELD(unsigned int rx : 3,
795 BITFIELD_FIELD(unsigned int nd : 1,
796 BITFIELD_FIELD(unsigned int l : 1,
797 BITFIELD_FIELD(unsigned int ra : 1,
798 BITFIELD_FIELD(unsigned int func : 5,
799 ;))))))
800};
801
802struct m16e_jal {
803 BITFIELD_FIELD(unsigned int opcode : 5,
804 BITFIELD_FIELD(unsigned int x : 1,
805 BITFIELD_FIELD(unsigned int imm20_16 : 5,
806 BITFIELD_FIELD(signed int imm25_21 : 5,
807 ;))))
808};
809
810struct m16e_i64 {
811 BITFIELD_FIELD(unsigned int opcode : 5,
812 BITFIELD_FIELD(unsigned int func : 3,
813 BITFIELD_FIELD(unsigned int imm : 8,
814 ;)))
815};
816
817struct m16e_ri64 {
818 BITFIELD_FIELD(unsigned int opcode : 5,
819 BITFIELD_FIELD(unsigned int func : 3,
820 BITFIELD_FIELD(unsigned int ry : 3,
821 BITFIELD_FIELD(unsigned int imm : 5,
822 ;))))
823};
824
825struct m16e_ri {
826 BITFIELD_FIELD(unsigned int opcode : 5,
827 BITFIELD_FIELD(unsigned int rx : 3,
828 BITFIELD_FIELD(unsigned int imm : 8,
829 ;)))
830};
831
832struct m16e_rri {
833 BITFIELD_FIELD(unsigned int opcode : 5,
834 BITFIELD_FIELD(unsigned int rx : 3,
835 BITFIELD_FIELD(unsigned int ry : 3,
836 BITFIELD_FIELD(unsigned int imm : 5,
837 ;))))
838};
839
840struct m16e_i8 {
841 BITFIELD_FIELD(unsigned int opcode : 5,
842 BITFIELD_FIELD(unsigned int func : 3,
843 BITFIELD_FIELD(unsigned int imm : 8,
844 ;)))
845};
846
314union mips_instruction { 847union mips_instruction {
315 unsigned int word; 848 unsigned int word;
316 unsigned short halfword[2]; 849 unsigned short halfword[2];
@@ -326,6 +859,37 @@ union mips_instruction {
326 struct b_format b_format; 859 struct b_format b_format;
327 struct ps_format ps_format; 860 struct ps_format ps_format;
328 struct v_format v_format; 861 struct v_format v_format;
862 struct fb_format fb_format;
863 struct fp0_format fp0_format;
864 struct mm_fp0_format mm_fp0_format;
865 struct fp1_format fp1_format;
866 struct mm_fp1_format mm_fp1_format;
867 struct mm_fp2_format mm_fp2_format;
868 struct mm_fp3_format mm_fp3_format;
869 struct mm_fp4_format mm_fp4_format;
870 struct mm_fp5_format mm_fp5_format;
871 struct fp6_format fp6_format;
872 struct mm_fp6_format mm_fp6_format;
873 struct mm_i_format mm_i_format;
874 struct mm_m_format mm_m_format;
875 struct mm_x_format mm_x_format;
876 struct mm_b0_format mm_b0_format;
877 struct mm_b1_format mm_b1_format;
878 struct mm16_m_format mm16_m_format ;
879 struct mm16_rb_format mm16_rb_format;
880 struct mm16_r3_format mm16_r3_format;
881 struct mm16_r5_format mm16_r5_format;
882};
883
884union mips16e_instruction {
885 unsigned int full : 16;
886 struct m16e_rr rr;
887 struct m16e_jal jal;
888 struct m16e_i64 i64;
889 struct m16e_ri64 ri64;
890 struct m16e_ri ri;
891 struct m16e_rri rri;
892 struct m16e_i8 i8;
329}; 893};
330 894
331#endif /* _UAPI_ASM_INST_H */ 895#endif /* _UAPI_ASM_INST_H */
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 520a908d45d6..6ad9e04bdf62 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -5,7 +5,7 @@
5extra-y := head.o vmlinux.lds 5extra-y := head.o vmlinux.lds
6 6
7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ 7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
8 ptrace.o reset.o setup.o signal.o syscall.o \ 8 prom.o ptrace.o reset.o setup.o signal.o syscall.o \
9 time.o topology.o traps.o unaligned.o watch.o vdso.o 9 time.o topology.o traps.o unaligned.o watch.o vdso.o
10 10
11ifdef CONFIG_FUNCTION_TRACER 11ifdef CONFIG_FUNCTION_TRACER
@@ -19,15 +19,16 @@ obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
19obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o 19obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
20obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o 20obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
21obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o 21obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
22obj-$(CONFIG_CEVT_GIC) += cevt-gic.o
22obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o 23obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
23obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o 24obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
24obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o 25obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
25obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o 26obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
27obj-$(CONFIG_CSRC_GIC) += csrc-gic.o
26obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o 28obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
27obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o 29obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o
28obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o 30obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
29obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o 31obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
30obj-$(CONFIG_CSRC_GIC) += csrc-gic.o
31obj-$(CONFIG_SYNC_R4K) += sync-r4k.o 32obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
32 33
33obj-$(CONFIG_STACKTRACE) += stacktrace.o 34obj-$(CONFIG_STACKTRACE) += stacktrace.o
@@ -86,8 +87,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
86obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o 87obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
87obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o 88obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
88 89
89obj-$(CONFIG_OF) += prom.o
90
91CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) 90CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
92 91
93obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o 92obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 50285b2c7ffe..0845091ba480 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -17,6 +17,8 @@
17#include <asm/ptrace.h> 17#include <asm/ptrace.h>
18#include <asm/processor.h> 18#include <asm/processor.h>
19 19
20#include <linux/kvm_host.h>
21
20void output_ptreg_defines(void) 22void output_ptreg_defines(void)
21{ 23{
22 COMMENT("MIPS pt_regs offsets."); 24 COMMENT("MIPS pt_regs offsets.");
@@ -328,3 +330,67 @@ void output_pbe_defines(void)
328 BLANK(); 330 BLANK();
329} 331}
330#endif 332#endif
333
334void output_kvm_defines(void)
335{
336 COMMENT(" KVM/MIPS Specfic offsets. ");
337 DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch));
338 OFFSET(VCPU_RUN, kvm_vcpu, run);
339 OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch);
340
341 OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase);
342 OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase);
343
344 OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack);
345 OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp);
346
347 OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr);
348 OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause);
349 OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc);
350 OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi);
351
352 OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst);
353
354 OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]);
355 OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]);
356 OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]);
357 OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]);
358 OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]);
359 OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]);
360 OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]);
361 OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]);
362 OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]);
363 OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]);
364 OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]);
365 OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]);
366 OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]);
367 OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]);
368 OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]);
369 OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]);
370 OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]);
371 OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]);
372 OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]);
373 OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]);
374 OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]);
375 OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]);
376 OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]);
377 OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]);
378 OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]);
379 OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]);
380 OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]);
381 OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]);
382 OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]);
383 OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]);
384 OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]);
385 OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]);
386 OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
387 OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
388 OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
389 OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
390 OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
391 OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
392
393 OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]);
394 OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
395 BLANK();
396}
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 556a4357d7fc..97c5a1668e53 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -48,7 +48,11 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
48 __res; \ 48 __res; \
49}) 49})
50 50
51#ifdef CONFIG_KVM_GUEST
52#define TASK32_SIZE 0x3fff8000UL
53#else
51#define TASK32_SIZE 0x7fff8000UL 54#define TASK32_SIZE 0x7fff8000UL
55#endif
52#undef ELF_ET_DYN_BASE 56#undef ELF_ET_DYN_BASE
53#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) 57#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
54 58
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 83ffe950f710..46c2ad0703a0 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -14,10 +14,186 @@
14#include <asm/cpu.h> 14#include <asm/cpu.h>
15#include <asm/cpu-features.h> 15#include <asm/cpu-features.h>
16#include <asm/fpu.h> 16#include <asm/fpu.h>
17#include <asm/fpu_emulator.h>
17#include <asm/inst.h> 18#include <asm/inst.h>
18#include <asm/ptrace.h> 19#include <asm/ptrace.h>
19#include <asm/uaccess.h> 20#include <asm/uaccess.h>
20 21
22/*
23 * Calculate and return exception PC in case of branch delay slot
24 * for microMIPS and MIPS16e. It does not clear the ISA mode bit.
25 */
26int __isa_exception_epc(struct pt_regs *regs)
27{
28 unsigned short inst;
29 long epc = regs->cp0_epc;
30
31 /* Calculate exception PC in branch delay slot. */
32 if (__get_user(inst, (u16 __user *) msk_isa16_mode(epc))) {
33 /* This should never happen because delay slot was checked. */
34 force_sig(SIGSEGV, current);
35 return epc;
36 }
37 if (cpu_has_mips16) {
38 if (((union mips16e_instruction)inst).ri.opcode
39 == MIPS16e_jal_op)
40 epc += 4;
41 else
42 epc += 2;
43 } else if (mm_insn_16bit(inst))
44 epc += 2;
45 else
46 epc += 4;
47
48 return epc;
49}
50
51/*
52 * Compute return address and emulate branch in microMIPS mode after an
53 * exception only. It does not handle compact branches/jumps and cannot
54 * be used in interrupt context. (Compact branches/jumps do not cause
55 * exceptions.)
56 */
57int __microMIPS_compute_return_epc(struct pt_regs *regs)
58{
59 u16 __user *pc16;
60 u16 halfword;
61 unsigned int word;
62 unsigned long contpc;
63 struct mm_decoded_insn mminsn = { 0 };
64
65 mminsn.micro_mips_mode = 1;
66
67 /* This load never faults. */
68 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
69 __get_user(halfword, pc16);
70 pc16++;
71 contpc = regs->cp0_epc + 2;
72 word = ((unsigned int)halfword << 16);
73 mminsn.pc_inc = 2;
74
75 if (!mm_insn_16bit(halfword)) {
76 __get_user(halfword, pc16);
77 pc16++;
78 contpc = regs->cp0_epc + 4;
79 mminsn.pc_inc = 4;
80 word |= halfword;
81 }
82 mminsn.insn = word;
83
84 if (get_user(halfword, pc16))
85 goto sigsegv;
86 mminsn.next_pc_inc = 2;
87 word = ((unsigned int)halfword << 16);
88
89 if (!mm_insn_16bit(halfword)) {
90 pc16++;
91 if (get_user(halfword, pc16))
92 goto sigsegv;
93 mminsn.next_pc_inc = 4;
94 word |= halfword;
95 }
96 mminsn.next_insn = word;
97
98 mm_isBranchInstr(regs, mminsn, &contpc);
99
100 regs->cp0_epc = contpc;
101
102 return 0;
103
104sigsegv:
105 force_sig(SIGSEGV, current);
106 return -EFAULT;
107}
108
109/*
110 * Compute return address and emulate branch in MIPS16e mode after an
111 * exception only. It does not handle compact branches/jumps and cannot
112 * be used in interrupt context. (Compact branches/jumps do not cause
113 * exceptions.)
114 */
115int __MIPS16e_compute_return_epc(struct pt_regs *regs)
116{
117 u16 __user *addr;
118 union mips16e_instruction inst;
119 u16 inst2;
120 u32 fullinst;
121 long epc;
122
123 epc = regs->cp0_epc;
124
125 /* Read the instruction. */
126 addr = (u16 __user *)msk_isa16_mode(epc);
127 if (__get_user(inst.full, addr)) {
128 force_sig(SIGSEGV, current);
129 return -EFAULT;
130 }
131
132 switch (inst.ri.opcode) {
133 case MIPS16e_extend_op:
134 regs->cp0_epc += 4;
135 return 0;
136
137 /*
138 * JAL and JALX in MIPS16e mode
139 */
140 case MIPS16e_jal_op:
141 addr += 1;
142 if (__get_user(inst2, addr)) {
143 force_sig(SIGSEGV, current);
144 return -EFAULT;
145 }
146 fullinst = ((unsigned)inst.full << 16) | inst2;
147 regs->regs[31] = epc + 6;
148 epc += 4;
149 epc >>= 28;
150 epc <<= 28;
151 /*
152 * JAL:5 X:1 TARGET[20-16]:5 TARGET[25:21]:5 TARGET[15:0]:16
153 *
154 * ......TARGET[15:0].................TARGET[20:16]...........
155 * ......TARGET[25:21]
156 */
157 epc |=
158 ((fullinst & 0xffff) << 2) | ((fullinst & 0x3e00000) >> 3) |
159 ((fullinst & 0x1f0000) << 7);
160 if (!inst.jal.x)
161 set_isa16_mode(epc); /* Set ISA mode bit. */
162 regs->cp0_epc = epc;
163 return 0;
164
165 /*
166 * J(AL)R(C)
167 */
168 case MIPS16e_rr_op:
169 if (inst.rr.func == MIPS16e_jr_func) {
170
171 if (inst.rr.ra)
172 regs->cp0_epc = regs->regs[31];
173 else
174 regs->cp0_epc =
175 regs->regs[reg16to32[inst.rr.rx]];
176
177 if (inst.rr.l) {
178 if (inst.rr.nd)
179 regs->regs[31] = epc + 2;
180 else
181 regs->regs[31] = epc + 4;
182 }
183 return 0;
184 }
185 break;
186 }
187
188 /*
189 * All other cases have no branch delay slot and are 16-bits.
190 * Branches do not cause an exception.
191 */
192 regs->cp0_epc += 2;
193
194 return 0;
195}
196
21/** 197/**
22 * __compute_return_epc_for_insn - Computes the return address and do emulate 198 * __compute_return_epc_for_insn - Computes the return address and do emulate
23 * branch simulation, if required. 199 * branch simulation, if required.
@@ -129,6 +305,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
129 epc <<= 28; 305 epc <<= 28;
130 epc |= (insn.j_format.target << 2); 306 epc |= (insn.j_format.target << 2);
131 regs->cp0_epc = epc; 307 regs->cp0_epc = epc;
308 if (insn.i_format.opcode == jalx_op)
309 set_isa16_mode(regs->cp0_epc);
132 break; 310 break;
133 311
134 /* 312 /*
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c
new file mode 100644
index 000000000000..730eaf92c018
--- /dev/null
+++ b/arch/mips/kernel/cevt-gic.c
@@ -0,0 +1,104 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2013 Imagination Technologies Ltd.
7 */
8#include <linux/clockchips.h>
9#include <linux/interrupt.h>
10#include <linux/percpu.h>
11#include <linux/smp.h>
12#include <linux/irq.h>
13
14#include <asm/time.h>
15#include <asm/gic.h>
16#include <asm/mips-boards/maltaint.h>
17
18DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
19int gic_timer_irq_installed;
20
21
22static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
23{
24 u64 cnt;
25 int res;
26
27 cnt = gic_read_count();
28 cnt += (u64)delta;
29 gic_write_compare(cnt);
30 res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
31 return res;
32}
33
34void gic_set_clock_mode(enum clock_event_mode mode,
35 struct clock_event_device *evt)
36{
37 /* Nothing to do ... */
38}
39
40irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
41{
42 struct clock_event_device *cd;
43 int cpu = smp_processor_id();
44
45 gic_write_compare(gic_read_compare());
46 cd = &per_cpu(gic_clockevent_device, cpu);
47 cd->event_handler(cd);
48 return IRQ_HANDLED;
49}
50
51struct irqaction gic_compare_irqaction = {
52 .handler = gic_compare_interrupt,
53 .flags = IRQF_PERCPU | IRQF_TIMER,
54 .name = "timer",
55};
56
57
58void gic_event_handler(struct clock_event_device *dev)
59{
60}
61
62int __cpuinit gic_clockevent_init(void)
63{
64 unsigned int cpu = smp_processor_id();
65 struct clock_event_device *cd;
66 unsigned int irq;
67
68 if (!cpu_has_counter || !gic_frequency)
69 return -ENXIO;
70
71 irq = MIPS_GIC_IRQ_BASE;
72
73 cd = &per_cpu(gic_clockevent_device, cpu);
74
75 cd->name = "MIPS GIC";
76 cd->features = CLOCK_EVT_FEAT_ONESHOT;
77
78 clockevent_set_clock(cd, gic_frequency);
79
80 /* Calculate the min / max delta */
81 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
82 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
83
84 cd->rating = 300;
85 cd->irq = irq;
86 cd->cpumask = cpumask_of(cpu);
87 cd->set_next_event = gic_next_event;
88 cd->set_mode = gic_set_clock_mode;
89 cd->event_handler = gic_event_handler;
90
91 clockevents_register_device(cd);
92
93 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_MAP), 0x80000002);
94 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), GIC_VPE_SMASK_CMP_MSK);
95
96 if (gic_timer_irq_installed)
97 return 0;
98
99 gic_timer_irq_installed = 1;
100
101 setup_irq(irq, &gic_compare_irqaction);
102 irq_set_handler(irq, handle_percpu_irq);
103 return 0;
104}
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 07b847d77f5d..02033eaf8825 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -23,7 +23,6 @@
23 */ 23 */
24 24
25#ifndef CONFIG_MIPS_MT_SMTC 25#ifndef CONFIG_MIPS_MT_SMTC
26
27static int mips_next_event(unsigned long delta, 26static int mips_next_event(unsigned long delta,
28 struct clock_event_device *evt) 27 struct clock_event_device *evt)
29{ 28{
@@ -49,7 +48,6 @@ DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
49int cp0_timer_irq_installed; 48int cp0_timer_irq_installed;
50 49
51#ifndef CONFIG_MIPS_MT_SMTC 50#ifndef CONFIG_MIPS_MT_SMTC
52
53irqreturn_t c0_compare_interrupt(int irq, void *dev_id) 51irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
54{ 52{
55 const int r2 = cpu_has_mips_r2; 53 const int r2 = cpu_has_mips_r2;
@@ -74,6 +72,9 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
74 /* Clear Count/Compare Interrupt */ 72 /* Clear Count/Compare Interrupt */
75 write_c0_compare(read_c0_compare()); 73 write_c0_compare(read_c0_compare());
76 cd = &per_cpu(mips_clockevent_device, cpu); 74 cd = &per_cpu(mips_clockevent_device, cpu);
75#ifdef CONFIG_CEVT_GIC
76 if (!gic_present)
77#endif
77 cd->event_handler(cd); 78 cd->event_handler(cd);
78 } 79 }
79 80
@@ -118,6 +119,10 @@ int c0_compare_int_usable(void)
118 unsigned int delta; 119 unsigned int delta;
119 unsigned int cnt; 120 unsigned int cnt;
120 121
122#ifdef CONFIG_KVM_GUEST
123 return 1;
124#endif
125
121 /* 126 /*
122 * IP7 already pending? Try to clear it by acking the timer. 127 * IP7 already pending? Try to clear it by acking the timer.
123 */ 128 */
@@ -166,7 +171,6 @@ int c0_compare_int_usable(void)
166} 171}
167 172
168#ifndef CONFIG_MIPS_MT_SMTC 173#ifndef CONFIG_MIPS_MT_SMTC
169
170int __cpuinit r4k_clockevent_init(void) 174int __cpuinit r4k_clockevent_init(void)
171{ 175{
172 unsigned int cpu = smp_processor_id(); 176 unsigned int cpu = smp_processor_id();
@@ -206,6 +210,9 @@ int __cpuinit r4k_clockevent_init(void)
206 cd->set_mode = mips_set_clock_mode; 210 cd->set_mode = mips_set_clock_mode;
207 cd->event_handler = mips_event_handler; 211 cd->event_handler = mips_event_handler;
208 212
213#ifdef CONFIG_CEVT_GIC
214 if (!gic_present)
215#endif
209 clockevents_register_device(cd); 216 clockevents_register_device(cd);
210 217
211 if (cp0_timer_irq_installed) 218 if (cp0_timer_irq_installed)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 5fe66a0c3224..4bbffdb9024f 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -470,6 +470,9 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
470 c->options |= MIPS_CPU_ULRI; 470 c->options |= MIPS_CPU_ULRI;
471 if (config3 & MIPS_CONF3_ISA) 471 if (config3 & MIPS_CONF3_ISA)
472 c->options |= MIPS_CPU_MICROMIPS; 472 c->options |= MIPS_CPU_MICROMIPS;
473#ifdef CONFIG_CPU_MICROMIPS
474 write_c0_config3(read_c0_config3() | MIPS_CONF3_ISA_OE);
475#endif
473 if (config3 & MIPS_CONF3_VZ) 476 if (config3 & MIPS_CONF3_VZ)
474 c->ases |= MIPS_ASE_VZ; 477 c->ases |= MIPS_ASE_VZ;
475 478
diff --git a/arch/mips/kernel/csrc-gic.c b/arch/mips/kernel/csrc-gic.c
index 5dca24bce51b..e02620901117 100644
--- a/arch/mips/kernel/csrc-gic.c
+++ b/arch/mips/kernel/csrc-gic.c
@@ -5,23 +5,14 @@
5 * 5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 */ 7 */
8#include <linux/clocksource.h>
9#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/time.h>
10 10
11#include <asm/time.h>
12#include <asm/gic.h> 11#include <asm/gic.h>
13 12
14static cycle_t gic_hpt_read(struct clocksource *cs) 13static cycle_t gic_hpt_read(struct clocksource *cs)
15{ 14{
16 unsigned int hi, hi2, lo; 15 return gic_read_count();
17
18 do {
19 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
20 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
21 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
22 } while (hi2 != hi);
23
24 return (((cycle_t) hi) << 32) + lo;
25} 16}
26 17
27static struct clocksource gic_clocksource = { 18static struct clocksource gic_clocksource = {
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index ecb347ce1b3d..5c2ba9f08a80 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -5,8 +5,8 @@
5 * 5 *
6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2001 MIPS Technologies, Inc.
9 * Copyright (C) 2002, 2007 Maciej W. Rozycki 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki
9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12 12
@@ -21,8 +21,10 @@
21#include <asm/war.h> 21#include <asm/war.h>
22#include <asm/thread_info.h> 22#include <asm/thread_info.h>
23 23
24#ifdef CONFIG_MIPS_MT_SMTC
24#define PANIC_PIC(msg) \ 25#define PANIC_PIC(msg) \
25 .set push; \ 26 .set push; \
27 .set nomicromips; \
26 .set reorder; \ 28 .set reorder; \
27 PTR_LA a0,8f; \ 29 PTR_LA a0,8f; \
28 .set noat; \ 30 .set noat; \
@@ -31,17 +33,10 @@
319: b 9b; \ 339: b 9b; \
32 .set pop; \ 34 .set pop; \
33 TEXT(msg) 35 TEXT(msg)
36#endif
34 37
35 __INIT 38 __INIT
36 39
37NESTED(except_vec0_generic, 0, sp)
38 PANIC_PIC("Exception vector 0 called")
39 END(except_vec0_generic)
40
41NESTED(except_vec1_generic, 0, sp)
42 PANIC_PIC("Exception vector 1 called")
43 END(except_vec1_generic)
44
45/* 40/*
46 * General exception vector for all other CPUs. 41 * General exception vector for all other CPUs.
47 * 42 *
@@ -138,12 +133,19 @@ LEAF(r4k_wait)
138 nop 133 nop
139 nop 134 nop
140 nop 135 nop
136#ifdef CONFIG_CPU_MICROMIPS
137 nop
138 nop
139 nop
140 nop
141#endif
141 .set mips3 142 .set mips3
142 wait 143 wait
143 /* end of rollback region (the region size must be power of two) */ 144 /* end of rollback region (the region size must be power of two) */
144 .set pop
1451: 1451:
146 jr ra 146 jr ra
147 nop
148 .set pop
147 END(r4k_wait) 149 END(r4k_wait)
148 150
149 .macro BUILD_ROLLBACK_PROLOGUE handler 151 .macro BUILD_ROLLBACK_PROLOGUE handler
@@ -201,7 +203,11 @@ NESTED(handle_int, PT_SIZE, sp)
201 LONG_L s0, TI_REGS($28) 203 LONG_L s0, TI_REGS($28)
202 LONG_S sp, TI_REGS($28) 204 LONG_S sp, TI_REGS($28)
203 PTR_LA ra, ret_from_irq 205 PTR_LA ra, ret_from_irq
204 j plat_irq_dispatch 206 PTR_LA v0, plat_irq_dispatch
207 jr v0
208#ifdef CONFIG_CPU_MICROMIPS
209 nop
210#endif
205 END(handle_int) 211 END(handle_int)
206 212
207 __INIT 213 __INIT
@@ -222,11 +228,14 @@ NESTED(except_vec4, 0, sp)
222/* 228/*
223 * EJTAG debug exception handler. 229 * EJTAG debug exception handler.
224 * The EJTAG debug exception entry point is 0xbfc00480, which 230 * The EJTAG debug exception entry point is 0xbfc00480, which
225 * normally is in the boot PROM, so the boot PROM must do a 231 * normally is in the boot PROM, so the boot PROM must do an
226 * unconditional jump to this vector. 232 * unconditional jump to this vector.
227 */ 233 */
228NESTED(except_vec_ejtag_debug, 0, sp) 234NESTED(except_vec_ejtag_debug, 0, sp)
229 j ejtag_debug_handler 235 j ejtag_debug_handler
236#ifdef CONFIG_CPU_MICROMIPS
237 nop
238#endif
230 END(except_vec_ejtag_debug) 239 END(except_vec_ejtag_debug)
231 240
232 __FINIT 241 __FINIT
@@ -251,9 +260,10 @@ NESTED(except_vec_vi, 0, sp)
251FEXPORT(except_vec_vi_mori) 260FEXPORT(except_vec_vi_mori)
252 ori a0, $0, 0 261 ori a0, $0, 0
253#endif /* CONFIG_MIPS_MT_SMTC */ 262#endif /* CONFIG_MIPS_MT_SMTC */
263 PTR_LA v1, except_vec_vi_handler
254FEXPORT(except_vec_vi_lui) 264FEXPORT(except_vec_vi_lui)
255 lui v0, 0 /* Patched */ 265 lui v0, 0 /* Patched */
256 j except_vec_vi_handler 266 jr v1
257FEXPORT(except_vec_vi_ori) 267FEXPORT(except_vec_vi_ori)
258 ori v0, 0 /* Patched */ 268 ori v0, 0 /* Patched */
259 .set pop 269 .set pop
@@ -354,6 +364,9 @@ EXPORT(ejtag_debug_buffer)
354 */ 364 */
355NESTED(except_vec_nmi, 0, sp) 365NESTED(except_vec_nmi, 0, sp)
356 j nmi_handler 366 j nmi_handler
367#ifdef CONFIG_CPU_MICROMIPS
368 nop
369#endif
357 END(except_vec_nmi) 370 END(except_vec_nmi)
358 371
359 __FINIT 372 __FINIT
@@ -480,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
480 .set noreorder 493 .set noreorder
481 /* check if TLB contains a entry for EPC */ 494 /* check if TLB contains a entry for EPC */
482 MFC0 k1, CP0_ENTRYHI 495 MFC0 k1, CP0_ENTRYHI
483 andi k1, 0xff /* ASID_MASK */ 496 andi k1, 0xff /* ASID_MASK patched at run-time!! */
484 MFC0 k0, CP0_EPC 497 MFC0 k0, CP0_EPC
485 PTR_SRL k0, _PAGE_SHIFT + 1 498 PTR_SRL k0, _PAGE_SHIFT + 1
486 PTR_SLL k0, _PAGE_SHIFT + 1 499 PTR_SLL k0, _PAGE_SHIFT + 1
@@ -500,13 +513,35 @@ NESTED(nmi_handler, PT_SIZE, sp)
500 .set push 513 .set push
501 .set noat 514 .set noat
502 .set noreorder 515 .set noreorder
503 /* 0x7c03e83b: rdhwr v1,$29 */ 516 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
517 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
504 MFC0 k1, CP0_EPC 518 MFC0 k1, CP0_EPC
505 lui k0, 0x7c03 519#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
506 lw k1, (k1) 520 and k0, k1, 1
507 ori k0, 0xe83b 521 beqz k0, 1f
508 .set reorder 522 xor k1, k0
523 lhu k0, (k1)
524 lhu k1, 2(k1)
525 ins k1, k0, 16, 16
526 lui k0, 0x007d
527 b docheck
528 ori k0, 0x6b3c
5291:
530 lui k0, 0x7c03
531 lw k1, (k1)
532 ori k0, 0xe83b
533#else
534 andi k0, k1, 1
535 bnez k0, handle_ri
536 lui k0, 0x7c03
537 lw k1, (k1)
538 ori k0, 0xe83b
539#endif
540 .set reorder
541docheck:
509 bne k0, k1, handle_ri /* if not ours */ 542 bne k0, k1, handle_ri /* if not ours */
543
544isrdhwr:
510 /* The insn is rdhwr. No need to check CAUSE.BD here. */ 545 /* The insn is rdhwr. No need to check CAUSE.BD here. */
511 get_saved_sp /* k1 := current_thread_info */ 546 get_saved_sp /* k1 := current_thread_info */
512 .set noreorder 547 .set noreorder
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index 485e6a961b31..c01b307317a9 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -10,6 +10,7 @@
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/irq.h> 12#include <linux/irq.h>
13#include <linux/clocksource.h>
13 14
14#include <asm/io.h> 15#include <asm/io.h>
15#include <asm/gic.h> 16#include <asm/gic.h>
@@ -19,6 +20,8 @@
19#include <linux/hardirq.h> 20#include <linux/hardirq.h>
20#include <asm-generic/bitops/find.h> 21#include <asm-generic/bitops/find.h>
21 22
23unsigned int gic_frequency;
24unsigned int gic_present;
22unsigned long _gic_base; 25unsigned long _gic_base;
23unsigned int gic_irq_base; 26unsigned int gic_irq_base;
24unsigned int gic_irq_flags[GIC_NUM_INTRS]; 27unsigned int gic_irq_flags[GIC_NUM_INTRS];
@@ -30,6 +33,39 @@ static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
30static struct gic_pending_regs pending_regs[NR_CPUS]; 33static struct gic_pending_regs pending_regs[NR_CPUS];
31static struct gic_intrmask_regs intrmask_regs[NR_CPUS]; 34static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
32 35
36#if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC)
37cycle_t gic_read_count(void)
38{
39 unsigned int hi, hi2, lo;
40
41 do {
42 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
43 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
44 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
45 } while (hi2 != hi);
46
47 return (((cycle_t) hi) << 32) + lo;
48}
49
50void gic_write_compare(cycle_t cnt)
51{
52 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
53 (int)(cnt >> 32));
54 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
55 (int)(cnt & 0xffffffff));
56}
57
58cycle_t gic_read_compare(void)
59{
60 unsigned int hi, lo;
61
62 GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi);
63 GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo);
64
65 return (((cycle_t) hi) << 32) + lo;
66}
67#endif
68
33unsigned int gic_get_timer_pending(void) 69unsigned int gic_get_timer_pending(void)
34{ 70{
35 unsigned int vpe_pending; 71 unsigned int vpe_pending;
@@ -116,6 +152,17 @@ static void __init vpe_local_setup(unsigned int numvpes)
116 } 152 }
117} 153}
118 154
155unsigned int gic_compare_int(void)
156{
157 unsigned int pending;
158
159 GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending);
160 if (pending & GIC_VPE_PEND_CMP_MSK)
161 return 1;
162 else
163 return 0;
164}
165
119unsigned int gic_get_int(void) 166unsigned int gic_get_int(void)
120{ 167{
121 unsigned int i; 168 unsigned int i;
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index d1d576b765f5..0b29646bcee7 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -165,10 +165,3 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2,
165 return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3), 165 return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3),
166 merge_64(len_a4, len_a5)); 166 merge_64(len_a4, len_a5));
167} 167}
168
169SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
170 u64, a3, u64, a4, int, dfd, const char __user *, pathname)
171{
172 return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
173 dfd, pathname);
174}
diff --git a/arch/mips/kernel/mips_machine.c b/arch/mips/kernel/mips_machine.c
index 411a058d2c53..876097529697 100644
--- a/arch/mips/kernel/mips_machine.c
+++ b/arch/mips/kernel/mips_machine.c
@@ -11,9 +11,9 @@
11#include <linux/slab.h> 11#include <linux/slab.h>
12 12
13#include <asm/mips_machine.h> 13#include <asm/mips_machine.h>
14#include <asm/prom.h>
14 15
15static struct mips_machine *mips_machine __initdata; 16static struct mips_machine *mips_machine __initdata;
16static char *mips_machine_name = "Unknown";
17 17
18#define for_each_machine(mach) \ 18#define for_each_machine(mach) \
19 for ((mach) = (struct mips_machine *)&__mips_machines_start; \ 19 for ((mach) = (struct mips_machine *)&__mips_machines_start; \
@@ -21,25 +21,6 @@ static char *mips_machine_name = "Unknown";
21 (unsigned long)(mach) < (unsigned long)&__mips_machines_end; \ 21 (unsigned long)(mach) < (unsigned long)&__mips_machines_end; \
22 (mach)++) 22 (mach)++)
23 23
24__init void mips_set_machine_name(const char *name)
25{
26 char *p;
27
28 if (name == NULL)
29 return;
30
31 p = kstrdup(name, GFP_KERNEL);
32 if (!p)
33 pr_err("MIPS: no memory for machine_name\n");
34
35 mips_machine_name = p;
36}
37
38char *mips_get_machine_name(void)
39{
40 return mips_machine_name;
41}
42
43__init int mips_machtype_setup(char *id) 24__init int mips_machtype_setup(char *id)
44{ 25{
45 struct mips_machine *mach; 26 struct mips_machine *mach;
@@ -79,7 +60,6 @@ __init void mips_machine_setup(void)
79 return; 60 return;
80 61
81 mips_set_machine_name(mips_machine->mach_name); 62 mips_set_machine_name(mips_machine->mach_name);
82 pr_info("MIPS: machine is %s\n", mips_machine_name);
83 63
84 if (mips_machine->mach_setup) 64 if (mips_machine->mach_setup)
85 mips_machine->mach_setup(); 65 mips_machine->mach_setup();
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 7a54f74b7818..a3e461408b7e 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -12,7 +12,7 @@
12#include <asm/cpu-features.h> 12#include <asm/cpu-features.h>
13#include <asm/mipsregs.h> 13#include <asm/mipsregs.h>
14#include <asm/processor.h> 14#include <asm/processor.h>
15#include <asm/mips_machine.h> 15#include <asm/prom.h>
16 16
17unsigned int vced_count, vcei_count; 17unsigned int vced_count, vcei_count;
18 18
@@ -99,6 +99,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
99 if (cpu_has_vz) seq_printf(m, "%s", " vz"); 99 if (cpu_has_vz) seq_printf(m, "%s", " vz");
100 seq_printf(m, "\n"); 100 seq_printf(m, "\n");
101 101
102 if (cpu_has_mmips) {
103 seq_printf(m, "micromips kernel\t: %s\n",
104 (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no");
105 }
102 seq_printf(m, "shadow register sets\t: %d\n", 106 seq_printf(m, "shadow register sets\t: %d\n",
103 cpu_data[n].srsets); 107 cpu_data[n].srsets);
104 seq_printf(m, "kscratch registers\t: %d\n", 108 seq_printf(m, "kscratch registers\t: %d\n",
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index cfc742d75b7f..eb902c1f0cad 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -7,6 +7,7 @@
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org) 7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer 9 * Copyright (C) 2004 Thiemo Seufer
10 * Copyright (C) 2013 Imagination Technologies Ltd.
10 */ 11 */
11#include <linux/errno.h> 12#include <linux/errno.h>
12#include <linux/sched.h> 13#include <linux/sched.h>
@@ -225,34 +226,115 @@ struct mips_frame_info {
225 226
226static inline int is_ra_save_ins(union mips_instruction *ip) 227static inline int is_ra_save_ins(union mips_instruction *ip)
227{ 228{
229#ifdef CONFIG_CPU_MICROMIPS
230 union mips_instruction mmi;
231
232 /*
233 * swsp ra,offset
234 * swm16 reglist,offset(sp)
235 * swm32 reglist,offset(sp)
236 * sw32 ra,offset(sp)
237 * jradiussp - NOT SUPPORTED
238 *
239 * microMIPS is way more fun...
240 */
241 if (mm_insn_16bit(ip->halfword[0])) {
242 mmi.word = (ip->halfword[0] << 16);
243 return ((mmi.mm16_r5_format.opcode == mm_swsp16_op &&
244 mmi.mm16_r5_format.rt == 31) ||
245 (mmi.mm16_m_format.opcode == mm_pool16c_op &&
246 mmi.mm16_m_format.func == mm_swm16_op));
247 }
248 else {
249 mmi.halfword[0] = ip->halfword[1];
250 mmi.halfword[1] = ip->halfword[0];
251 return ((mmi.mm_m_format.opcode == mm_pool32b_op &&
252 mmi.mm_m_format.rd > 9 &&
253 mmi.mm_m_format.base == 29 &&
254 mmi.mm_m_format.func == mm_swm32_func) ||
255 (mmi.i_format.opcode == mm_sw32_op &&
256 mmi.i_format.rs == 29 &&
257 mmi.i_format.rt == 31));
258 }
259#else
228 /* sw / sd $ra, offset($sp) */ 260 /* sw / sd $ra, offset($sp) */
229 return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && 261 return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
230 ip->i_format.rs == 29 && 262 ip->i_format.rs == 29 &&
231 ip->i_format.rt == 31; 263 ip->i_format.rt == 31;
264#endif
232} 265}
233 266
234static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) 267static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
235{ 268{
269#ifdef CONFIG_CPU_MICROMIPS
270 /*
271 * jr16,jrc,jalr16,jalr16
272 * jal
273 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
274 * jraddiusp - NOT SUPPORTED
275 *
276 * microMIPS is kind of more fun...
277 */
278 union mips_instruction mmi;
279
280 mmi.word = (ip->halfword[0] << 16);
281
282 if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
283 (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
284 ip->j_format.opcode == mm_jal32_op)
285 return 1;
286 if (ip->r_format.opcode != mm_pool32a_op ||
287 ip->r_format.func != mm_pool32axf_op)
288 return 0;
289 return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
290#else
236 if (ip->j_format.opcode == jal_op) 291 if (ip->j_format.opcode == jal_op)
237 return 1; 292 return 1;
238 if (ip->r_format.opcode != spec_op) 293 if (ip->r_format.opcode != spec_op)
239 return 0; 294 return 0;
240 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op; 295 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
296#endif
241} 297}
242 298
243static inline int is_sp_move_ins(union mips_instruction *ip) 299static inline int is_sp_move_ins(union mips_instruction *ip)
244{ 300{
301#ifdef CONFIG_CPU_MICROMIPS
302 /*
303 * addiusp -imm
304 * addius5 sp,-imm
305 * addiu32 sp,sp,-imm
306 * jradiussp - NOT SUPPORTED
307 *
308 * microMIPS is not more fun...
309 */
310 if (mm_insn_16bit(ip->halfword[0])) {
311 union mips_instruction mmi;
312
313 mmi.word = (ip->halfword[0] << 16);
314 return ((mmi.mm16_r3_format.opcode == mm_pool16d_op &&
315 mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
316 (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
317 mmi.mm16_r5_format.rt == 29));
318 }
319 return (ip->mm_i_format.opcode == mm_addiu32_op &&
320 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29);
321#else
245 /* addiu/daddiu sp,sp,-imm */ 322 /* addiu/daddiu sp,sp,-imm */
246 if (ip->i_format.rs != 29 || ip->i_format.rt != 29) 323 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
247 return 0; 324 return 0;
248 if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) 325 if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
249 return 1; 326 return 1;
327#endif
250 return 0; 328 return 0;
251} 329}
252 330
253static int get_frame_info(struct mips_frame_info *info) 331static int get_frame_info(struct mips_frame_info *info)
254{ 332{
333#ifdef CONFIG_CPU_MICROMIPS
334 union mips_instruction *ip = (void *) (((char *) info->func) - 1);
335#else
255 union mips_instruction *ip = info->func; 336 union mips_instruction *ip = info->func;
337#endif
256 unsigned max_insns = info->func_size / sizeof(union mips_instruction); 338 unsigned max_insns = info->func_size / sizeof(union mips_instruction);
257 unsigned i; 339 unsigned i;
258 340
@@ -272,7 +354,26 @@ static int get_frame_info(struct mips_frame_info *info)
272 break; 354 break;
273 if (!info->frame_size) { 355 if (!info->frame_size) {
274 if (is_sp_move_ins(ip)) 356 if (is_sp_move_ins(ip))
357 {
358#ifdef CONFIG_CPU_MICROMIPS
359 if (mm_insn_16bit(ip->halfword[0]))
360 {
361 unsigned short tmp;
362
363 if (ip->halfword[0] & mm_addiusp_func)
364 {
365 tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
366 info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
367 } else {
368 tmp = (ip->halfword[0] >> 1);
369 info->frame_size = -(signed short)(tmp & 0xf);
370 }
371 ip = (void *) &ip->halfword[1];
372 ip--;
373 } else
374#endif
275 info->frame_size = - ip->i_format.simmediate; 375 info->frame_size = - ip->i_format.simmediate;
376 }
276 continue; 377 continue;
277 } 378 }
278 if (info->pc_offset == -1 && is_ra_save_ins(ip)) { 379 if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 028f6f837ef9..5712bb532245 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -23,6 +23,23 @@
23#include <asm/page.h> 23#include <asm/page.h>
24#include <asm/prom.h> 24#include <asm/prom.h>
25 25
26static char mips_machine_name[64] = "Unknown";
27
28__init void mips_set_machine_name(const char *name)
29{
30 if (name == NULL)
31 return;
32
33 strncpy(mips_machine_name, name, sizeof(mips_machine_name));
34 pr_info("MIPS: machine is %s\n", mips_get_machine_name());
35}
36
37char *mips_get_machine_name(void)
38{
39 return mips_machine_name;
40}
41
42#ifdef CONFIG_OF
26int __init early_init_dt_scan_memory_arch(unsigned long node, 43int __init early_init_dt_scan_memory_arch(unsigned long node,
27 const char *uname, int depth, 44 const char *uname, int depth,
28 void *data) 45 void *data)
@@ -50,6 +67,18 @@ void __init early_init_dt_setup_initrd_arch(unsigned long start,
50} 67}
51#endif 68#endif
52 69
70int __init early_init_dt_scan_model(unsigned long node, const char *uname,
71 int depth, void *data)
72{
73 if (!depth) {
74 char *model = of_get_flat_dt_prop(node, "model", NULL);
75
76 if (model)
77 mips_set_machine_name(model);
78 }
79 return 0;
80}
81
53void __init early_init_devtree(void *params) 82void __init early_init_devtree(void *params)
54{ 83{
55 /* Setup flat device-tree pointer */ 84 /* Setup flat device-tree pointer */
@@ -65,6 +94,9 @@ void __init early_init_devtree(void *params)
65 /* Scan memory nodes */ 94 /* Scan memory nodes */
66 of_scan_flat_dt(early_init_dt_scan_root, NULL); 95 of_scan_flat_dt(early_init_dt_scan_root, NULL);
67 of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL); 96 of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL);
97
98 /* try to load the mips machine name */
99 of_scan_flat_dt(early_init_dt_scan_model, NULL);
68} 100}
69 101
70void __init __dt_setup_arch(struct boot_param_header *bph) 102void __init __dt_setup_arch(struct boot_param_header *bph)
@@ -79,3 +111,4 @@ void __init __dt_setup_arch(struct boot_param_header *bph)
79 111
80 early_init_devtree(initial_boot_params); 112 early_init_devtree(initial_boot_params);
81} 113}
114#endif
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 9ea29649fc28..9b36424b03c5 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -138,9 +138,18 @@ stackargs:
1385: jr t1 1385: jr t1
139 sw t5, 16(sp) # argument #5 to ksp 139 sw t5, 16(sp) # argument #5 to ksp
140 140
141#ifdef CONFIG_CPU_MICROMIPS
141 sw t8, 28(sp) # argument #8 to ksp 142 sw t8, 28(sp) # argument #8 to ksp
143 nop
142 sw t7, 24(sp) # argument #7 to ksp 144 sw t7, 24(sp) # argument #7 to ksp
145 nop
143 sw t6, 20(sp) # argument #6 to ksp 146 sw t6, 20(sp) # argument #6 to ksp
147 nop
148#else
149 sw t8, 28(sp) # argument #8 to ksp
150 sw t7, 24(sp) # argument #7 to ksp
151 sw t6, 20(sp) # argument #6 to ksp
152#endif
1446: j stack_done # go back 1536: j stack_done # go back
145 nop 154 nop
146 .set pop 155 .set pop
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 103bfe570fe8..74f485d3c0ef 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -529,7 +529,7 @@ sys_call_table:
529 PTR sys_accept4 529 PTR sys_accept4
530 PTR compat_sys_recvmmsg /* 4335 */ 530 PTR compat_sys_recvmmsg /* 4335 */
531 PTR sys_fanotify_init 531 PTR sys_fanotify_init
532 PTR sys_32_fanotify_mark 532 PTR compat_sys_fanotify_mark
533 PTR sys_prlimit64 533 PTR sys_prlimit64
534 PTR sys_name_to_handle_at 534 PTR sys_name_to_handle_at
535 PTR compat_sys_open_by_handle_at /* 4340 */ 535 PTR compat_sys_open_by_handle_at /* 4340 */
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 4c774d5d5087..c7f90519e58c 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -23,6 +23,7 @@
23#include <linux/pfn.h> 23#include <linux/pfn.h>
24#include <linux/debugfs.h> 24#include <linux/debugfs.h>
25#include <linux/kexec.h> 25#include <linux/kexec.h>
26#include <linux/sizes.h>
26 27
27#include <asm/addrspace.h> 28#include <asm/addrspace.h>
28#include <asm/bootinfo.h> 29#include <asm/bootinfo.h>
@@ -77,6 +78,8 @@ EXPORT_SYMBOL(mips_io_port_base);
77static struct resource code_resource = { .name = "Kernel code", }; 78static struct resource code_resource = { .name = "Kernel code", };
78static struct resource data_resource = { .name = "Kernel data", }; 79static struct resource data_resource = { .name = "Kernel data", };
79 80
81static void *detect_magic __initdata = detect_memory_region;
82
80void __init add_memory_region(phys_t start, phys_t size, long type) 83void __init add_memory_region(phys_t start, phys_t size, long type)
81{ 84{
82 int x = boot_mem_map.nr_map; 85 int x = boot_mem_map.nr_map;
@@ -122,6 +125,25 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
122 boot_mem_map.nr_map++; 125 boot_mem_map.nr_map++;
123} 126}
124 127
128void __init detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max)
129{
130 void *dm = &detect_magic;
131 phys_t size;
132
133 for (size = sz_min; size < sz_max; size <<= 1) {
134 if (!memcmp(dm, dm + size, sizeof(detect_magic)))
135 break;
136 }
137
138 pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
139 ((unsigned long long) size) / SZ_1M,
140 (unsigned long long) start,
141 ((unsigned long long) sz_min) / SZ_1M,
142 ((unsigned long long) sz_max) / SZ_1M);
143
144 add_memory_region(start, size, BOOT_MEM_RAM);
145}
146
125static void __init print_memory_map(void) 147static void __init print_memory_map(void)
126{ 148{
127 int i; 149 int i;
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index b5e88fd83277..fd3ef2c2afbc 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -35,6 +35,7 @@
35#include <asm/war.h> 35#include <asm/war.h>
36#include <asm/vdso.h> 36#include <asm/vdso.h>
37#include <asm/dsp.h> 37#include <asm/dsp.h>
38#include <asm/inst.h>
38 39
39#include "signal-common.h" 40#include "signal-common.h"
40 41
@@ -480,7 +481,15 @@ static void handle_signal(unsigned long sig, siginfo_t *info,
480 sigset_t *oldset = sigmask_to_save(); 481 sigset_t *oldset = sigmask_to_save();
481 int ret; 482 int ret;
482 struct mips_abi *abi = current->thread.abi; 483 struct mips_abi *abi = current->thread.abi;
484#ifdef CONFIG_CPU_MICROMIPS
485 void *vdso;
486 unsigned int tmp = (unsigned int)current->mm->context.vdso;
487
488 set_isa16_mode(tmp);
489 vdso = (void *)tmp;
490#else
483 void *vdso = current->mm->context.vdso; 491 void *vdso = current->mm->context.vdso;
492#endif
484 493
485 if (regs->regs[0]) { 494 if (regs->regs[0]) {
486 switch(regs->regs[2]) { 495 switch(regs->regs[2]) {
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index bfede063d96a..3e5164c11cac 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -34,6 +34,7 @@
34#include <asm/mipsregs.h> 34#include <asm/mipsregs.h>
35#include <asm/mipsmtregs.h> 35#include <asm/mipsmtregs.h>
36#include <asm/mips_mt.h> 36#include <asm/mips_mt.h>
37#include <asm/gic.h>
37 38
38static void __init smvp_copy_vpe_config(void) 39static void __init smvp_copy_vpe_config(void)
39{ 40{
@@ -151,8 +152,6 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
151static void __cpuinit vsmp_init_secondary(void) 152static void __cpuinit vsmp_init_secondary(void)
152{ 153{
153#ifdef CONFIG_IRQ_GIC 154#ifdef CONFIG_IRQ_GIC
154 extern int gic_present;
155
156 /* This is Malta specific: IPI,performance and timer interrupts */ 155 /* This is Malta specific: IPI,performance and timer interrupts */
157 if (gic_present) 156 if (gic_present)
158 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | 157 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index aee04af213c5..c17619fe18e3 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -83,6 +83,7 @@ static inline void set_cpu_sibling_map(int cpu)
83} 83}
84 84
85struct plat_smp_ops *mp_ops; 85struct plat_smp_ops *mp_ops;
86EXPORT_SYMBOL(mp_ops);
86 87
87__cpuinit void register_smp_ops(struct plat_smp_ops *ops) 88__cpuinit void register_smp_ops(struct plat_smp_ops *ops)
88{ 89{
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S
index 76016ac0a9c8..2866863a39df 100644
--- a/arch/mips/kernel/smtc-asm.S
+++ b/arch/mips/kernel/smtc-asm.S
@@ -49,6 +49,9 @@ CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
49 .text 49 .text
50 .align 5 50 .align 5
51FEXPORT(__smtc_ipi_vector) 51FEXPORT(__smtc_ipi_vector)
52#ifdef CONFIG_CPU_MICROMIPS
53 nop
54#endif
52 .set noat 55 .set noat
53 /* Disable thread scheduling to make Status update atomic */ 56 /* Disable thread scheduling to make Status update atomic */
54 DMT 27 # dmt k1 57 DMT 27 # dmt k1
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 7186222dc5bb..31d22f3121c9 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -111,7 +111,7 @@ static int vpe0limit;
111static int ipibuffers; 111static int ipibuffers;
112static int nostlb; 112static int nostlb;
113static int asidmask; 113static int asidmask;
114unsigned long smtc_asid_mask = 0xff; 114unsigned int smtc_asid_mask = 0xff;
115 115
116static int __init vpe0tcs(char *str) 116static int __init vpe0tcs(char *str)
117{ 117{
@@ -1395,7 +1395,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1395 asid = asid_cache(cpu); 1395 asid = asid_cache(cpu);
1396 1396
1397 do { 1397 do {
1398 if (!((asid += ASID_INC) & ASID_MASK) ) { 1398 if (!ASID_MASK(ASID_INC(asid))) {
1399 if (cpu_has_vtag_icache) 1399 if (cpu_has_vtag_icache)
1400 flush_icache_all(); 1400 flush_icache_all();
1401 /* Traverse all online CPUs (hack requires contiguous range) */ 1401 /* Traverse all online CPUs (hack requires contiguous range) */
@@ -1414,7 +1414,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1414 mips_ihb(); 1414 mips_ihb();
1415 } 1415 }
1416 tcstat = read_tc_c0_tcstatus(); 1416 tcstat = read_tc_c0_tcstatus();
1417 smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); 1417 smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i);
1418 if (!prevhalt) 1418 if (!prevhalt)
1419 write_tc_c0_tchalt(0); 1419 write_tc_c0_tchalt(0);
1420 } 1420 }
@@ -1423,7 +1423,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1423 asid = ASID_FIRST_VERSION; 1423 asid = ASID_FIRST_VERSION;
1424 local_flush_tlb_all(); /* start new asid cycle */ 1424 local_flush_tlb_all(); /* start new asid cycle */
1425 } 1425 }
1426 } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); 1426 } while (smtc_live_asid[tlb][ASID_MASK(asid)]);
1427 1427
1428 /* 1428 /*
1429 * SMTC shares the TLB within VPEs and possibly across all VPEs. 1429 * SMTC shares the TLB within VPEs and possibly across all VPEs.
@@ -1461,7 +1461,7 @@ void smtc_flush_tlb_asid(unsigned long asid)
1461 tlb_read(); 1461 tlb_read();
1462 ehb(); 1462 ehb();
1463 ehi = read_c0_entryhi(); 1463 ehi = read_c0_entryhi();
1464 if ((ehi & ASID_MASK) == asid) { 1464 if (ASID_MASK(ehi) == asid) {
1465 /* 1465 /*
1466 * Invalidate only entries with specified ASID, 1466 * Invalidate only entries with specified ASID,
1467 * makiing sure all entries differ. 1467 * makiing sure all entries differ.
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 25225515451f..77cff1f6d050 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -8,8 +8,8 @@
8 * Copyright (C) 1998 Ulf Carlsson 8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc. 9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2000, 01 MIPS Technologies, Inc.
12 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki 11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
13 */ 13 */
14#include <linux/bug.h> 14#include <linux/bug.h>
15#include <linux/compiler.h> 15#include <linux/compiler.h>
@@ -60,9 +60,9 @@ extern void check_wait(void);
60extern asmlinkage void r4k_wait(void); 60extern asmlinkage void r4k_wait(void);
61extern asmlinkage void rollback_handle_int(void); 61extern asmlinkage void rollback_handle_int(void);
62extern asmlinkage void handle_int(void); 62extern asmlinkage void handle_int(void);
63extern asmlinkage void handle_tlbm(void); 63extern u32 handle_tlbl[];
64extern asmlinkage void handle_tlbl(void); 64extern u32 handle_tlbs[];
65extern asmlinkage void handle_tlbs(void); 65extern u32 handle_tlbm[];
66extern asmlinkage void handle_adel(void); 66extern asmlinkage void handle_adel(void);
67extern asmlinkage void handle_ades(void); 67extern asmlinkage void handle_ades(void);
68extern asmlinkage void handle_ibe(void); 68extern asmlinkage void handle_ibe(void);
@@ -83,10 +83,6 @@ extern asmlinkage void handle_dsp(void);
83extern asmlinkage void handle_mcheck(void); 83extern asmlinkage void handle_mcheck(void);
84extern asmlinkage void handle_reserved(void); 84extern asmlinkage void handle_reserved(void);
85 85
86extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
87 struct mips_fpu_struct *ctx, int has_fpu,
88 void *__user *fault_addr);
89
90void (*board_be_init)(void); 86void (*board_be_init)(void);
91int (*board_be_handler)(struct pt_regs *regs, int is_fixup); 87int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
92void (*board_nmi_handler_setup)(void); 88void (*board_nmi_handler_setup)(void);
@@ -482,6 +478,12 @@ asmlinkage void do_be(struct pt_regs *regs)
482#define SYNC 0x0000000f 478#define SYNC 0x0000000f
483#define RDHWR 0x0000003b 479#define RDHWR 0x0000003b
484 480
481/* microMIPS definitions */
482#define MM_POOL32A_FUNC 0xfc00ffff
483#define MM_RDHWR 0x00006b3c
484#define MM_RS 0x001f0000
485#define MM_RT 0x03e00000
486
485/* 487/*
486 * The ll_bit is cleared by r*_switch.S 488 * The ll_bit is cleared by r*_switch.S
487 */ 489 */
@@ -596,42 +598,62 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
596 * Simulate trapping 'rdhwr' instructions to provide user accessible 598 * Simulate trapping 'rdhwr' instructions to provide user accessible
597 * registers not implemented in hardware. 599 * registers not implemented in hardware.
598 */ 600 */
599static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) 601static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
600{ 602{
601 struct thread_info *ti = task_thread_info(current); 603 struct thread_info *ti = task_thread_info(current);
602 604
605 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
606 1, regs, 0);
607 switch (rd) {
608 case 0: /* CPU number */
609 regs->regs[rt] = smp_processor_id();
610 return 0;
611 case 1: /* SYNCI length */
612 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
613 current_cpu_data.icache.linesz);
614 return 0;
615 case 2: /* Read count register */
616 regs->regs[rt] = read_c0_count();
617 return 0;
618 case 3: /* Count register resolution */
619 switch (current_cpu_data.cputype) {
620 case CPU_20KC:
621 case CPU_25KF:
622 regs->regs[rt] = 1;
623 break;
624 default:
625 regs->regs[rt] = 2;
626 }
627 return 0;
628 case 29:
629 regs->regs[rt] = ti->tp_value;
630 return 0;
631 default:
632 return -1;
633 }
634}
635
636static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
637{
603 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { 638 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
604 int rd = (opcode & RD) >> 11; 639 int rd = (opcode & RD) >> 11;
605 int rt = (opcode & RT) >> 16; 640 int rt = (opcode & RT) >> 16;
606 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 641
607 1, regs, 0); 642 simulate_rdhwr(regs, rd, rt);
608 switch (rd) { 643 return 0;
609 case 0: /* CPU number */ 644 }
610 regs->regs[rt] = smp_processor_id(); 645
611 return 0; 646 /* Not ours. */
612 case 1: /* SYNCI length */ 647 return -1;
613 regs->regs[rt] = min(current_cpu_data.dcache.linesz, 648}
614 current_cpu_data.icache.linesz); 649
615 return 0; 650static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
616 case 2: /* Read count register */ 651{
617 regs->regs[rt] = read_c0_count(); 652 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
618 return 0; 653 int rd = (opcode & MM_RS) >> 16;
619 case 3: /* Count register resolution */ 654 int rt = (opcode & MM_RT) >> 21;
620 switch (current_cpu_data.cputype) { 655 simulate_rdhwr(regs, rd, rt);
621 case CPU_20KC: 656 return 0;
622 case CPU_25KF:
623 regs->regs[rt] = 1;
624 break;
625 default:
626 regs->regs[rt] = 2;
627 }
628 return 0;
629 case 29:
630 regs->regs[rt] = ti->tp_value;
631 return 0;
632 default:
633 return -1;
634 }
635 } 657 }
636 658
637 /* Not ours. */ 659 /* Not ours. */
@@ -662,7 +684,7 @@ asmlinkage void do_ov(struct pt_regs *regs)
662 force_sig_info(SIGFPE, &info, current); 684 force_sig_info(SIGFPE, &info, current);
663} 685}
664 686
665static int process_fpemu_return(int sig, void __user *fault_addr) 687int process_fpemu_return(int sig, void __user *fault_addr)
666{ 688{
667 if (sig == SIGSEGV || sig == SIGBUS) { 689 if (sig == SIGSEGV || sig == SIGBUS) {
668 struct siginfo si = {0}; 690 struct siginfo si = {0};
@@ -813,9 +835,29 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
813asmlinkage void do_bp(struct pt_regs *regs) 835asmlinkage void do_bp(struct pt_regs *regs)
814{ 836{
815 unsigned int opcode, bcode; 837 unsigned int opcode, bcode;
816 838 unsigned long epc;
817 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) 839 u16 instr[2];
818 goto out_sigsegv; 840
841 if (get_isa16_mode(regs->cp0_epc)) {
842 /* Calculate EPC. */
843 epc = exception_epc(regs);
844 if (cpu_has_mmips) {
845 if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
846 (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
847 goto out_sigsegv;
848 opcode = (instr[0] << 16) | instr[1];
849 } else {
850 /* MIPS16e mode */
851 if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)))
852 goto out_sigsegv;
853 bcode = (instr[0] >> 6) & 0x3f;
854 do_trap_or_bp(regs, bcode, "Break");
855 return;
856 }
857 } else {
858 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
859 goto out_sigsegv;
860 }
819 861
820 /* 862 /*
821 * There is the ancient bug in the MIPS assemblers that the break 863 * There is the ancient bug in the MIPS assemblers that the break
@@ -856,13 +898,22 @@ out_sigsegv:
856asmlinkage void do_tr(struct pt_regs *regs) 898asmlinkage void do_tr(struct pt_regs *regs)
857{ 899{
858 unsigned int opcode, tcode = 0; 900 unsigned int opcode, tcode = 0;
901 u16 instr[2];
902 unsigned long epc = exception_epc(regs);
859 903
860 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) 904 if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) ||
861 goto out_sigsegv; 905 (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))
906 goto out_sigsegv;
907 opcode = (instr[0] << 16) | instr[1];
862 908
863 /* Immediate versions don't provide a code. */ 909 /* Immediate versions don't provide a code. */
864 if (!(opcode & OPCODE)) 910 if (!(opcode & OPCODE)) {
865 tcode = ((opcode >> 6) & ((1 << 10) - 1)); 911 if (get_isa16_mode(regs->cp0_epc))
912 /* microMIPS */
913 tcode = (opcode >> 12) & 0x1f;
914 else
915 tcode = ((opcode >> 6) & ((1 << 10) - 1));
916 }
866 917
867 do_trap_or_bp(regs, tcode, "Trap"); 918 do_trap_or_bp(regs, tcode, "Trap");
868 return; 919 return;
@@ -875,6 +926,7 @@ asmlinkage void do_ri(struct pt_regs *regs)
875{ 926{
876 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); 927 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
877 unsigned long old_epc = regs->cp0_epc; 928 unsigned long old_epc = regs->cp0_epc;
929 unsigned long old31 = regs->regs[31];
878 unsigned int opcode = 0; 930 unsigned int opcode = 0;
879 int status = -1; 931 int status = -1;
880 932
@@ -887,23 +939,37 @@ asmlinkage void do_ri(struct pt_regs *regs)
887 if (unlikely(compute_return_epc(regs) < 0)) 939 if (unlikely(compute_return_epc(regs) < 0))
888 return; 940 return;
889 941
890 if (unlikely(get_user(opcode, epc) < 0)) 942 if (get_isa16_mode(regs->cp0_epc)) {
891 status = SIGSEGV; 943 unsigned short mmop[2] = { 0 };
892 944
893 if (!cpu_has_llsc && status < 0) 945 if (unlikely(get_user(mmop[0], epc) < 0))
894 status = simulate_llsc(regs, opcode); 946 status = SIGSEGV;
947 if (unlikely(get_user(mmop[1], epc) < 0))
948 status = SIGSEGV;
949 opcode = (mmop[0] << 16) | mmop[1];
895 950
896 if (status < 0) 951 if (status < 0)
897 status = simulate_rdhwr(regs, opcode); 952 status = simulate_rdhwr_mm(regs, opcode);
953 } else {
954 if (unlikely(get_user(opcode, epc) < 0))
955 status = SIGSEGV;
898 956
899 if (status < 0) 957 if (!cpu_has_llsc && status < 0)
900 status = simulate_sync(regs, opcode); 958 status = simulate_llsc(regs, opcode);
959
960 if (status < 0)
961 status = simulate_rdhwr_normal(regs, opcode);
962
963 if (status < 0)
964 status = simulate_sync(regs, opcode);
965 }
901 966
902 if (status < 0) 967 if (status < 0)
903 status = SIGILL; 968 status = SIGILL;
904 969
905 if (unlikely(status > 0)) { 970 if (unlikely(status > 0)) {
906 regs->cp0_epc = old_epc; /* Undo skip-over. */ 971 regs->cp0_epc = old_epc; /* Undo skip-over. */
972 regs->regs[31] = old31;
907 force_sig(status, current); 973 force_sig(status, current);
908 } 974 }
909} 975}
@@ -973,7 +1039,7 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
973asmlinkage void do_cpu(struct pt_regs *regs) 1039asmlinkage void do_cpu(struct pt_regs *regs)
974{ 1040{
975 unsigned int __user *epc; 1041 unsigned int __user *epc;
976 unsigned long old_epc; 1042 unsigned long old_epc, old31;
977 unsigned int opcode; 1043 unsigned int opcode;
978 unsigned int cpid; 1044 unsigned int cpid;
979 int status; 1045 int status;
@@ -987,26 +1053,41 @@ asmlinkage void do_cpu(struct pt_regs *regs)
987 case 0: 1053 case 0:
988 epc = (unsigned int __user *)exception_epc(regs); 1054 epc = (unsigned int __user *)exception_epc(regs);
989 old_epc = regs->cp0_epc; 1055 old_epc = regs->cp0_epc;
1056 old31 = regs->regs[31];
990 opcode = 0; 1057 opcode = 0;
991 status = -1; 1058 status = -1;
992 1059
993 if (unlikely(compute_return_epc(regs) < 0)) 1060 if (unlikely(compute_return_epc(regs) < 0))
994 return; 1061 return;
995 1062
996 if (unlikely(get_user(opcode, epc) < 0)) 1063 if (get_isa16_mode(regs->cp0_epc)) {
997 status = SIGSEGV; 1064 unsigned short mmop[2] = { 0 };
998 1065
999 if (!cpu_has_llsc && status < 0) 1066 if (unlikely(get_user(mmop[0], epc) < 0))
1000 status = simulate_llsc(regs, opcode); 1067 status = SIGSEGV;
1068 if (unlikely(get_user(mmop[1], epc) < 0))
1069 status = SIGSEGV;
1070 opcode = (mmop[0] << 16) | mmop[1];
1001 1071
1002 if (status < 0) 1072 if (status < 0)
1003 status = simulate_rdhwr(regs, opcode); 1073 status = simulate_rdhwr_mm(regs, opcode);
1074 } else {
1075 if (unlikely(get_user(opcode, epc) < 0))
1076 status = SIGSEGV;
1077
1078 if (!cpu_has_llsc && status < 0)
1079 status = simulate_llsc(regs, opcode);
1080
1081 if (status < 0)
1082 status = simulate_rdhwr_normal(regs, opcode);
1083 }
1004 1084
1005 if (status < 0) 1085 if (status < 0)
1006 status = SIGILL; 1086 status = SIGILL;
1007 1087
1008 if (unlikely(status > 0)) { 1088 if (unlikely(status > 0)) {
1009 regs->cp0_epc = old_epc; /* Undo skip-over. */ 1089 regs->cp0_epc = old_epc; /* Undo skip-over. */
1090 regs->regs[31] = old31;
1010 force_sig(status, current); 1091 force_sig(status, current);
1011 } 1092 }
1012 1093
@@ -1320,7 +1401,7 @@ asmlinkage void cache_parity_error(void)
1320void ejtag_exception_handler(struct pt_regs *regs) 1401void ejtag_exception_handler(struct pt_regs *regs)
1321{ 1402{
1322 const int field = 2 * sizeof(unsigned long); 1403 const int field = 2 * sizeof(unsigned long);
1323 unsigned long depc, old_epc; 1404 unsigned long depc, old_epc, old_ra;
1324 unsigned int debug; 1405 unsigned int debug;
1325 1406
1326 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); 1407 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
@@ -1335,10 +1416,12 @@ void ejtag_exception_handler(struct pt_regs *regs)
1335 * calculation. 1416 * calculation.
1336 */ 1417 */
1337 old_epc = regs->cp0_epc; 1418 old_epc = regs->cp0_epc;
1419 old_ra = regs->regs[31];
1338 regs->cp0_epc = depc; 1420 regs->cp0_epc = depc;
1339 __compute_return_epc(regs); 1421 compute_return_epc(regs);
1340 depc = regs->cp0_epc; 1422 depc = regs->cp0_epc;
1341 regs->cp0_epc = old_epc; 1423 regs->cp0_epc = old_epc;
1424 regs->regs[31] = old_ra;
1342 } else 1425 } else
1343 depc += 4; 1426 depc += 4;
1344 write_c0_depc(depc); 1427 write_c0_depc(depc);
@@ -1377,11 +1460,27 @@ unsigned long vi_handlers[64];
1377void __init *set_except_vector(int n, void *addr) 1460void __init *set_except_vector(int n, void *addr)
1378{ 1461{
1379 unsigned long handler = (unsigned long) addr; 1462 unsigned long handler = (unsigned long) addr;
1380 unsigned long old_handler = exception_handlers[n]; 1463 unsigned long old_handler;
1464
1465#ifdef CONFIG_CPU_MICROMIPS
1466 /*
1467 * Only the TLB handlers are cache aligned with an even
1468 * address. All other handlers are on an odd address and
1469 * require no modification. Otherwise, MIPS32 mode will
1470 * be entered when handling any TLB exceptions. That
1471 * would be bad...since we must stay in microMIPS mode.
1472 */
1473 if (!(handler & 0x1))
1474 handler |= 1;
1475#endif
1476 old_handler = xchg(&exception_handlers[n], handler);
1381 1477
1382 exception_handlers[n] = handler;
1383 if (n == 0 && cpu_has_divec) { 1478 if (n == 0 && cpu_has_divec) {
1479#ifdef CONFIG_CPU_MICROMIPS
1480 unsigned long jump_mask = ~((1 << 27) - 1);
1481#else
1384 unsigned long jump_mask = ~((1 << 28) - 1); 1482 unsigned long jump_mask = ~((1 << 28) - 1);
1483#endif
1385 u32 *buf = (u32 *)(ebase + 0x200); 1484 u32 *buf = (u32 *)(ebase + 0x200);
1386 unsigned int k0 = 26; 1485 unsigned int k0 = 26;
1387 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { 1486 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
@@ -1397,7 +1496,7 @@ void __init *set_except_vector(int n, void *addr)
1397 return (void *)old_handler; 1496 return (void *)old_handler;
1398} 1497}
1399 1498
1400static asmlinkage void do_default_vi(void) 1499static void do_default_vi(void)
1401{ 1500{
1402 show_regs(get_irq_regs()); 1501 show_regs(get_irq_regs());
1403 panic("Caught unexpected vectored interrupt."); 1502 panic("Caught unexpected vectored interrupt.");
@@ -1408,17 +1507,18 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1408 unsigned long handler; 1507 unsigned long handler;
1409 unsigned long old_handler = vi_handlers[n]; 1508 unsigned long old_handler = vi_handlers[n];
1410 int srssets = current_cpu_data.srsets; 1509 int srssets = current_cpu_data.srsets;
1411 u32 *w; 1510 u16 *h;
1412 unsigned char *b; 1511 unsigned char *b;
1413 1512
1414 BUG_ON(!cpu_has_veic && !cpu_has_vint); 1513 BUG_ON(!cpu_has_veic && !cpu_has_vint);
1514 BUG_ON((n < 0) && (n > 9));
1415 1515
1416 if (addr == NULL) { 1516 if (addr == NULL) {
1417 handler = (unsigned long) do_default_vi; 1517 handler = (unsigned long) do_default_vi;
1418 srs = 0; 1518 srs = 0;
1419 } else 1519 } else
1420 handler = (unsigned long) addr; 1520 handler = (unsigned long) addr;
1421 vi_handlers[n] = (unsigned long) addr; 1521 vi_handlers[n] = handler;
1422 1522
1423 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); 1523 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1424 1524
@@ -1437,9 +1537,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1437 if (srs == 0) { 1537 if (srs == 0) {
1438 /* 1538 /*
1439 * If no shadow set is selected then use the default handler 1539 * If no shadow set is selected then use the default handler
1440 * that does normal register saving and a standard interrupt exit 1540 * that does normal register saving and standard interrupt exit
1441 */ 1541 */
1442
1443 extern char except_vec_vi, except_vec_vi_lui; 1542 extern char except_vec_vi, except_vec_vi_lui;
1444 extern char except_vec_vi_ori, except_vec_vi_end; 1543 extern char except_vec_vi_ori, except_vec_vi_end;
1445 extern char rollback_except_vec_vi; 1544 extern char rollback_except_vec_vi;
@@ -1452,11 +1551,20 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1452 * Status.IM bit to be masked before going there. 1551 * Status.IM bit to be masked before going there.
1453 */ 1552 */
1454 extern char except_vec_vi_mori; 1553 extern char except_vec_vi_mori;
1554#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1555 const int mori_offset = &except_vec_vi_mori - vec_start + 2;
1556#else
1455 const int mori_offset = &except_vec_vi_mori - vec_start; 1557 const int mori_offset = &except_vec_vi_mori - vec_start;
1558#endif
1456#endif /* CONFIG_MIPS_MT_SMTC */ 1559#endif /* CONFIG_MIPS_MT_SMTC */
1457 const int handler_len = &except_vec_vi_end - vec_start; 1560#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1561 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1562 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1563#else
1458 const int lui_offset = &except_vec_vi_lui - vec_start; 1564 const int lui_offset = &except_vec_vi_lui - vec_start;
1459 const int ori_offset = &except_vec_vi_ori - vec_start; 1565 const int ori_offset = &except_vec_vi_ori - vec_start;
1566#endif
1567 const int handler_len = &except_vec_vi_end - vec_start;
1460 1568
1461 if (handler_len > VECTORSPACING) { 1569 if (handler_len > VECTORSPACING) {
1462 /* 1570 /*
@@ -1466,30 +1574,44 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1466 panic("VECTORSPACING too small"); 1574 panic("VECTORSPACING too small");
1467 } 1575 }
1468 1576
1469 memcpy(b, vec_start, handler_len); 1577 set_handler(((unsigned long)b - ebase), vec_start,
1578#ifdef CONFIG_CPU_MICROMIPS
1579 (handler_len - 1));
1580#else
1581 handler_len);
1582#endif
1470#ifdef CONFIG_MIPS_MT_SMTC 1583#ifdef CONFIG_MIPS_MT_SMTC
1471 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ 1584 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
1472 1585
1473 w = (u32 *)(b + mori_offset); 1586 h = (u16 *)(b + mori_offset);
1474 *w = (*w & 0xffff0000) | (0x100 << n); 1587 *h = (0x100 << n);
1475#endif /* CONFIG_MIPS_MT_SMTC */ 1588#endif /* CONFIG_MIPS_MT_SMTC */
1476 w = (u32 *)(b + lui_offset); 1589 h = (u16 *)(b + lui_offset);
1477 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); 1590 *h = (handler >> 16) & 0xffff;
1478 w = (u32 *)(b + ori_offset); 1591 h = (u16 *)(b + ori_offset);
1479 *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); 1592 *h = (handler & 0xffff);
1480 local_flush_icache_range((unsigned long)b, 1593 local_flush_icache_range((unsigned long)b,
1481 (unsigned long)(b+handler_len)); 1594 (unsigned long)(b+handler_len));
1482 } 1595 }
1483 else { 1596 else {
1484 /* 1597 /*
1485 * In other cases jump directly to the interrupt handler 1598 * In other cases jump directly to the interrupt handler. It
1486 * 1599 * is the handler's responsibility to save registers if required
1487 * It is the handlers responsibility to save registers if required 1600 * (eg hi/lo) and return from the exception using "eret".
1488 * (eg hi/lo) and return from the exception using "eret"
1489 */ 1601 */
1490 w = (u32 *)b; 1602 u32 insn;
1491 *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ 1603
1492 *w = 0; 1604 h = (u16 *)b;
1605 /* j handler */
1606#ifdef CONFIG_CPU_MICROMIPS
1607 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
1608#else
1609 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
1610#endif
1611 h[0] = (insn >> 16) & 0xffff;
1612 h[1] = insn & 0xffff;
1613 h[2] = 0;
1614 h[3] = 0;
1493 local_flush_icache_range((unsigned long)b, 1615 local_flush_icache_range((unsigned long)b,
1494 (unsigned long)(b+8)); 1616 (unsigned long)(b+8));
1495 } 1617 }
@@ -1534,6 +1656,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1534 unsigned int cpu = smp_processor_id(); 1656 unsigned int cpu = smp_processor_id();
1535 unsigned int status_set = ST0_CU0; 1657 unsigned int status_set = ST0_CU0;
1536 unsigned int hwrena = cpu_hwrena_impl_bits; 1658 unsigned int hwrena = cpu_hwrena_impl_bits;
1659 unsigned long asid = 0;
1537#ifdef CONFIG_MIPS_MT_SMTC 1660#ifdef CONFIG_MIPS_MT_SMTC
1538 int secondaryTC = 0; 1661 int secondaryTC = 0;
1539 int bootTC = (cpu == 0); 1662 int bootTC = (cpu == 0);
@@ -1617,8 +1740,9 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1617 } 1740 }
1618#endif /* CONFIG_MIPS_MT_SMTC */ 1741#endif /* CONFIG_MIPS_MT_SMTC */
1619 1742
1620 if (!cpu_data[cpu].asid_cache) 1743 asid = ASID_FIRST_VERSION;
1621 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 1744 cpu_data[cpu].asid_cache = asid;
1745 TLBMISS_HANDLER_SETUP();
1622 1746
1623 atomic_inc(&init_mm.mm_count); 1747 atomic_inc(&init_mm.mm_count);
1624 current->active_mm = &init_mm; 1748 current->active_mm = &init_mm;
@@ -1648,7 +1772,11 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1648/* Install CPU exception handler */ 1772/* Install CPU exception handler */
1649void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size) 1773void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
1650{ 1774{
1775#ifdef CONFIG_CPU_MICROMIPS
1776 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
1777#else
1651 memcpy((void *)(ebase + offset), addr, size); 1778 memcpy((void *)(ebase + offset), addr, size);
1779#endif
1652 local_flush_icache_range(ebase + offset, ebase + offset + size); 1780 local_flush_icache_range(ebase + offset, ebase + offset + size);
1653} 1781}
1654 1782
@@ -1682,8 +1810,9 @@ __setup("rdhwr_noopt", set_rdhwr_noopt);
1682 1810
1683void __init trap_init(void) 1811void __init trap_init(void)
1684{ 1812{
1685 extern char except_vec3_generic, except_vec3_r4000; 1813 extern char except_vec3_generic;
1686 extern char except_vec4; 1814 extern char except_vec4;
1815 extern char except_vec3_r4000;
1687 unsigned long i; 1816 unsigned long i;
1688 int rollback; 1817 int rollback;
1689 1818
@@ -1700,7 +1829,12 @@ void __init trap_init(void)
1700 ebase = (unsigned long) 1829 ebase = (unsigned long)
1701 __alloc_bootmem(size, 1 << fls(size), 0); 1830 __alloc_bootmem(size, 1 << fls(size), 0);
1702 } else { 1831 } else {
1703 ebase = CKSEG0; 1832#ifdef CONFIG_KVM_GUEST
1833#define KVM_GUEST_KSEG0 0x40000000
1834 ebase = KVM_GUEST_KSEG0;
1835#else
1836 ebase = CKSEG0;
1837#endif
1704 if (cpu_has_mips_r2) 1838 if (cpu_has_mips_r2)
1705 ebase += (read_c0_ebase() & 0x3ffff000); 1839 ebase += (read_c0_ebase() & 0x3ffff000);
1706 } 1840 }
@@ -1816,11 +1950,11 @@ void __init trap_init(void)
1816 1950
1817 if (cpu_has_vce) 1951 if (cpu_has_vce)
1818 /* Special exception: R4[04]00 uses also the divec space. */ 1952 /* Special exception: R4[04]00 uses also the divec space. */
1819 memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100); 1953 set_handler(0x180, &except_vec3_r4000, 0x100);
1820 else if (cpu_has_4kex) 1954 else if (cpu_has_4kex)
1821 memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80); 1955 set_handler(0x180, &except_vec3_generic, 0x80);
1822 else 1956 else
1823 memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80); 1957 set_handler(0x080, &except_vec3_generic, 0x80);
1824 1958
1825 local_flush_icache_range(ebase, ebase + 0x400); 1959 local_flush_icache_range(ebase, ebase + 0x400);
1826 flush_tlb_handlers(); 1960 flush_tlb_handlers();
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 6087a54c86a0..203d8857070d 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -83,8 +83,12 @@
83#include <asm/branch.h> 83#include <asm/branch.h>
84#include <asm/byteorder.h> 84#include <asm/byteorder.h>
85#include <asm/cop2.h> 85#include <asm/cop2.h>
86#include <asm/fpu.h>
87#include <asm/fpu_emulator.h>
86#include <asm/inst.h> 88#include <asm/inst.h>
87#include <asm/uaccess.h> 89#include <asm/uaccess.h>
90#include <asm/fpu.h>
91#include <asm/fpu_emulator.h>
88 92
89#define STR(x) __STR(x) 93#define STR(x) __STR(x)
90#define __STR(x) #x 94#define __STR(x) #x
@@ -102,12 +106,332 @@ static u32 unaligned_action;
102#endif 106#endif
103extern void show_registers(struct pt_regs *regs); 107extern void show_registers(struct pt_regs *regs);
104 108
109#ifdef __BIG_ENDIAN
110#define LoadHW(addr, value, res) \
111 __asm__ __volatile__ (".set\tnoat\n" \
112 "1:\tlb\t%0, 0(%2)\n" \
113 "2:\tlbu\t$1, 1(%2)\n\t" \
114 "sll\t%0, 0x8\n\t" \
115 "or\t%0, $1\n\t" \
116 "li\t%1, 0\n" \
117 "3:\t.set\tat\n\t" \
118 ".insn\n\t" \
119 ".section\t.fixup,\"ax\"\n\t" \
120 "4:\tli\t%1, %3\n\t" \
121 "j\t3b\n\t" \
122 ".previous\n\t" \
123 ".section\t__ex_table,\"a\"\n\t" \
124 STR(PTR)"\t1b, 4b\n\t" \
125 STR(PTR)"\t2b, 4b\n\t" \
126 ".previous" \
127 : "=&r" (value), "=r" (res) \
128 : "r" (addr), "i" (-EFAULT));
129
130#define LoadW(addr, value, res) \
131 __asm__ __volatile__ ( \
132 "1:\tlwl\t%0, (%2)\n" \
133 "2:\tlwr\t%0, 3(%2)\n\t" \
134 "li\t%1, 0\n" \
135 "3:\n\t" \
136 ".insn\n\t" \
137 ".section\t.fixup,\"ax\"\n\t" \
138 "4:\tli\t%1, %3\n\t" \
139 "j\t3b\n\t" \
140 ".previous\n\t" \
141 ".section\t__ex_table,\"a\"\n\t" \
142 STR(PTR)"\t1b, 4b\n\t" \
143 STR(PTR)"\t2b, 4b\n\t" \
144 ".previous" \
145 : "=&r" (value), "=r" (res) \
146 : "r" (addr), "i" (-EFAULT));
147
148#define LoadHWU(addr, value, res) \
149 __asm__ __volatile__ ( \
150 ".set\tnoat\n" \
151 "1:\tlbu\t%0, 0(%2)\n" \
152 "2:\tlbu\t$1, 1(%2)\n\t" \
153 "sll\t%0, 0x8\n\t" \
154 "or\t%0, $1\n\t" \
155 "li\t%1, 0\n" \
156 "3:\n\t" \
157 ".insn\n\t" \
158 ".set\tat\n\t" \
159 ".section\t.fixup,\"ax\"\n\t" \
160 "4:\tli\t%1, %3\n\t" \
161 "j\t3b\n\t" \
162 ".previous\n\t" \
163 ".section\t__ex_table,\"a\"\n\t" \
164 STR(PTR)"\t1b, 4b\n\t" \
165 STR(PTR)"\t2b, 4b\n\t" \
166 ".previous" \
167 : "=&r" (value), "=r" (res) \
168 : "r" (addr), "i" (-EFAULT));
169
170#define LoadWU(addr, value, res) \
171 __asm__ __volatile__ ( \
172 "1:\tlwl\t%0, (%2)\n" \
173 "2:\tlwr\t%0, 3(%2)\n\t" \
174 "dsll\t%0, %0, 32\n\t" \
175 "dsrl\t%0, %0, 32\n\t" \
176 "li\t%1, 0\n" \
177 "3:\n\t" \
178 ".insn\n\t" \
179 "\t.section\t.fixup,\"ax\"\n\t" \
180 "4:\tli\t%1, %3\n\t" \
181 "j\t3b\n\t" \
182 ".previous\n\t" \
183 ".section\t__ex_table,\"a\"\n\t" \
184 STR(PTR)"\t1b, 4b\n\t" \
185 STR(PTR)"\t2b, 4b\n\t" \
186 ".previous" \
187 : "=&r" (value), "=r" (res) \
188 : "r" (addr), "i" (-EFAULT));
189
190#define LoadDW(addr, value, res) \
191 __asm__ __volatile__ ( \
192 "1:\tldl\t%0, (%2)\n" \
193 "2:\tldr\t%0, 7(%2)\n\t" \
194 "li\t%1, 0\n" \
195 "3:\n\t" \
196 ".insn\n\t" \
197 "\t.section\t.fixup,\"ax\"\n\t" \
198 "4:\tli\t%1, %3\n\t" \
199 "j\t3b\n\t" \
200 ".previous\n\t" \
201 ".section\t__ex_table,\"a\"\n\t" \
202 STR(PTR)"\t1b, 4b\n\t" \
203 STR(PTR)"\t2b, 4b\n\t" \
204 ".previous" \
205 : "=&r" (value), "=r" (res) \
206 : "r" (addr), "i" (-EFAULT));
207
208#define StoreHW(addr, value, res) \
209 __asm__ __volatile__ ( \
210 ".set\tnoat\n" \
211 "1:\tsb\t%1, 1(%2)\n\t" \
212 "srl\t$1, %1, 0x8\n" \
213 "2:\tsb\t$1, 0(%2)\n\t" \
214 ".set\tat\n\t" \
215 "li\t%0, 0\n" \
216 "3:\n\t" \
217 ".insn\n\t" \
218 ".section\t.fixup,\"ax\"\n\t" \
219 "4:\tli\t%0, %3\n\t" \
220 "j\t3b\n\t" \
221 ".previous\n\t" \
222 ".section\t__ex_table,\"a\"\n\t" \
223 STR(PTR)"\t1b, 4b\n\t" \
224 STR(PTR)"\t2b, 4b\n\t" \
225 ".previous" \
226 : "=r" (res) \
227 : "r" (value), "r" (addr), "i" (-EFAULT));
228
229#define StoreW(addr, value, res) \
230 __asm__ __volatile__ ( \
231 "1:\tswl\t%1,(%2)\n" \
232 "2:\tswr\t%1, 3(%2)\n\t" \
233 "li\t%0, 0\n" \
234 "3:\n\t" \
235 ".insn\n\t" \
236 ".section\t.fixup,\"ax\"\n\t" \
237 "4:\tli\t%0, %3\n\t" \
238 "j\t3b\n\t" \
239 ".previous\n\t" \
240 ".section\t__ex_table,\"a\"\n\t" \
241 STR(PTR)"\t1b, 4b\n\t" \
242 STR(PTR)"\t2b, 4b\n\t" \
243 ".previous" \
244 : "=r" (res) \
245 : "r" (value), "r" (addr), "i" (-EFAULT));
246
247#define StoreDW(addr, value, res) \
248 __asm__ __volatile__ ( \
249 "1:\tsdl\t%1,(%2)\n" \
250 "2:\tsdr\t%1, 7(%2)\n\t" \
251 "li\t%0, 0\n" \
252 "3:\n\t" \
253 ".insn\n\t" \
254 ".section\t.fixup,\"ax\"\n\t" \
255 "4:\tli\t%0, %3\n\t" \
256 "j\t3b\n\t" \
257 ".previous\n\t" \
258 ".section\t__ex_table,\"a\"\n\t" \
259 STR(PTR)"\t1b, 4b\n\t" \
260 STR(PTR)"\t2b, 4b\n\t" \
261 ".previous" \
262 : "=r" (res) \
263 : "r" (value), "r" (addr), "i" (-EFAULT));
264#endif
265
266#ifdef __LITTLE_ENDIAN
267#define LoadHW(addr, value, res) \
268 __asm__ __volatile__ (".set\tnoat\n" \
269 "1:\tlb\t%0, 1(%2)\n" \
270 "2:\tlbu\t$1, 0(%2)\n\t" \
271 "sll\t%0, 0x8\n\t" \
272 "or\t%0, $1\n\t" \
273 "li\t%1, 0\n" \
274 "3:\t.set\tat\n\t" \
275 ".insn\n\t" \
276 ".section\t.fixup,\"ax\"\n\t" \
277 "4:\tli\t%1, %3\n\t" \
278 "j\t3b\n\t" \
279 ".previous\n\t" \
280 ".section\t__ex_table,\"a\"\n\t" \
281 STR(PTR)"\t1b, 4b\n\t" \
282 STR(PTR)"\t2b, 4b\n\t" \
283 ".previous" \
284 : "=&r" (value), "=r" (res) \
285 : "r" (addr), "i" (-EFAULT));
286
287#define LoadW(addr, value, res) \
288 __asm__ __volatile__ ( \
289 "1:\tlwl\t%0, 3(%2)\n" \
290 "2:\tlwr\t%0, (%2)\n\t" \
291 "li\t%1, 0\n" \
292 "3:\n\t" \
293 ".insn\n\t" \
294 ".section\t.fixup,\"ax\"\n\t" \
295 "4:\tli\t%1, %3\n\t" \
296 "j\t3b\n\t" \
297 ".previous\n\t" \
298 ".section\t__ex_table,\"a\"\n\t" \
299 STR(PTR)"\t1b, 4b\n\t" \
300 STR(PTR)"\t2b, 4b\n\t" \
301 ".previous" \
302 : "=&r" (value), "=r" (res) \
303 : "r" (addr), "i" (-EFAULT));
304
305#define LoadHWU(addr, value, res) \
306 __asm__ __volatile__ ( \
307 ".set\tnoat\n" \
308 "1:\tlbu\t%0, 1(%2)\n" \
309 "2:\tlbu\t$1, 0(%2)\n\t" \
310 "sll\t%0, 0x8\n\t" \
311 "or\t%0, $1\n\t" \
312 "li\t%1, 0\n" \
313 "3:\n\t" \
314 ".insn\n\t" \
315 ".set\tat\n\t" \
316 ".section\t.fixup,\"ax\"\n\t" \
317 "4:\tli\t%1, %3\n\t" \
318 "j\t3b\n\t" \
319 ".previous\n\t" \
320 ".section\t__ex_table,\"a\"\n\t" \
321 STR(PTR)"\t1b, 4b\n\t" \
322 STR(PTR)"\t2b, 4b\n\t" \
323 ".previous" \
324 : "=&r" (value), "=r" (res) \
325 : "r" (addr), "i" (-EFAULT));
326
327#define LoadWU(addr, value, res) \
328 __asm__ __volatile__ ( \
329 "1:\tlwl\t%0, 3(%2)\n" \
330 "2:\tlwr\t%0, (%2)\n\t" \
331 "dsll\t%0, %0, 32\n\t" \
332 "dsrl\t%0, %0, 32\n\t" \
333 "li\t%1, 0\n" \
334 "3:\n\t" \
335 ".insn\n\t" \
336 "\t.section\t.fixup,\"ax\"\n\t" \
337 "4:\tli\t%1, %3\n\t" \
338 "j\t3b\n\t" \
339 ".previous\n\t" \
340 ".section\t__ex_table,\"a\"\n\t" \
341 STR(PTR)"\t1b, 4b\n\t" \
342 STR(PTR)"\t2b, 4b\n\t" \
343 ".previous" \
344 : "=&r" (value), "=r" (res) \
345 : "r" (addr), "i" (-EFAULT));
346
347#define LoadDW(addr, value, res) \
348 __asm__ __volatile__ ( \
349 "1:\tldl\t%0, 7(%2)\n" \
350 "2:\tldr\t%0, (%2)\n\t" \
351 "li\t%1, 0\n" \
352 "3:\n\t" \
353 ".insn\n\t" \
354 "\t.section\t.fixup,\"ax\"\n\t" \
355 "4:\tli\t%1, %3\n\t" \
356 "j\t3b\n\t" \
357 ".previous\n\t" \
358 ".section\t__ex_table,\"a\"\n\t" \
359 STR(PTR)"\t1b, 4b\n\t" \
360 STR(PTR)"\t2b, 4b\n\t" \
361 ".previous" \
362 : "=&r" (value), "=r" (res) \
363 : "r" (addr), "i" (-EFAULT));
364
365#define StoreHW(addr, value, res) \
366 __asm__ __volatile__ ( \
367 ".set\tnoat\n" \
368 "1:\tsb\t%1, 0(%2)\n\t" \
369 "srl\t$1,%1, 0x8\n" \
370 "2:\tsb\t$1, 1(%2)\n\t" \
371 ".set\tat\n\t" \
372 "li\t%0, 0\n" \
373 "3:\n\t" \
374 ".insn\n\t" \
375 ".section\t.fixup,\"ax\"\n\t" \
376 "4:\tli\t%0, %3\n\t" \
377 "j\t3b\n\t" \
378 ".previous\n\t" \
379 ".section\t__ex_table,\"a\"\n\t" \
380 STR(PTR)"\t1b, 4b\n\t" \
381 STR(PTR)"\t2b, 4b\n\t" \
382 ".previous" \
383 : "=r" (res) \
384 : "r" (value), "r" (addr), "i" (-EFAULT));
385
386#define StoreW(addr, value, res) \
387 __asm__ __volatile__ ( \
388 "1:\tswl\t%1, 3(%2)\n" \
389 "2:\tswr\t%1, (%2)\n\t" \
390 "li\t%0, 0\n" \
391 "3:\n\t" \
392 ".insn\n\t" \
393 ".section\t.fixup,\"ax\"\n\t" \
394 "4:\tli\t%0, %3\n\t" \
395 "j\t3b\n\t" \
396 ".previous\n\t" \
397 ".section\t__ex_table,\"a\"\n\t" \
398 STR(PTR)"\t1b, 4b\n\t" \
399 STR(PTR)"\t2b, 4b\n\t" \
400 ".previous" \
401 : "=r" (res) \
402 : "r" (value), "r" (addr), "i" (-EFAULT));
403
404#define StoreDW(addr, value, res) \
405 __asm__ __volatile__ ( \
406 "1:\tsdl\t%1, 7(%2)\n" \
407 "2:\tsdr\t%1, (%2)\n\t" \
408 "li\t%0, 0\n" \
409 "3:\n\t" \
410 ".insn\n\t" \
411 ".section\t.fixup,\"ax\"\n\t" \
412 "4:\tli\t%0, %3\n\t" \
413 "j\t3b\n\t" \
414 ".previous\n\t" \
415 ".section\t__ex_table,\"a\"\n\t" \
416 STR(PTR)"\t1b, 4b\n\t" \
417 STR(PTR)"\t2b, 4b\n\t" \
418 ".previous" \
419 : "=r" (res) \
420 : "r" (value), "r" (addr), "i" (-EFAULT));
421#endif
422
105static void emulate_load_store_insn(struct pt_regs *regs, 423static void emulate_load_store_insn(struct pt_regs *regs,
106 void __user *addr, unsigned int __user *pc) 424 void __user *addr, unsigned int __user *pc)
107{ 425{
108 union mips_instruction insn; 426 union mips_instruction insn;
109 unsigned long value; 427 unsigned long value;
110 unsigned int res; 428 unsigned int res;
429 unsigned long origpc;
430 unsigned long orig31;
431 void __user *fault_addr = NULL;
432
433 origpc = (unsigned long)pc;
434 orig31 = regs->regs[31];
111 435
112 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); 436 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
113 437
@@ -117,22 +441,22 @@ static void emulate_load_store_insn(struct pt_regs *regs,
117 __get_user(insn.word, pc); 441 __get_user(insn.word, pc);
118 442
119 switch (insn.i_format.opcode) { 443 switch (insn.i_format.opcode) {
120 /* 444 /*
121 * These are instructions that a compiler doesn't generate. We 445 * These are instructions that a compiler doesn't generate. We
122 * can assume therefore that the code is MIPS-aware and 446 * can assume therefore that the code is MIPS-aware and
123 * really buggy. Emulating these instructions would break the 447 * really buggy. Emulating these instructions would break the
124 * semantics anyway. 448 * semantics anyway.
125 */ 449 */
126 case ll_op: 450 case ll_op:
127 case lld_op: 451 case lld_op:
128 case sc_op: 452 case sc_op:
129 case scd_op: 453 case scd_op:
130 454
131 /* 455 /*
132 * For these instructions the only way to create an address 456 * For these instructions the only way to create an address
133 * error is an attempted access to kernel/supervisor address 457 * error is an attempted access to kernel/supervisor address
134 * space. 458 * space.
135 */ 459 */
136 case ldl_op: 460 case ldl_op:
137 case ldr_op: 461 case ldr_op:
138 case lwl_op: 462 case lwl_op:
@@ -146,36 +470,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
146 case sb_op: 470 case sb_op:
147 goto sigbus; 471 goto sigbus;
148 472
149 /* 473 /*
150 * The remaining opcodes are the ones that are really of interest. 474 * The remaining opcodes are the ones that are really of
151 */ 475 * interest.
476 */
152 case lh_op: 477 case lh_op:
153 if (!access_ok(VERIFY_READ, addr, 2)) 478 if (!access_ok(VERIFY_READ, addr, 2))
154 goto sigbus; 479 goto sigbus;
155 480
156 __asm__ __volatile__ (".set\tnoat\n" 481 LoadHW(addr, value, res);
157#ifdef __BIG_ENDIAN
158 "1:\tlb\t%0, 0(%2)\n"
159 "2:\tlbu\t$1, 1(%2)\n\t"
160#endif
161#ifdef __LITTLE_ENDIAN
162 "1:\tlb\t%0, 1(%2)\n"
163 "2:\tlbu\t$1, 0(%2)\n\t"
164#endif
165 "sll\t%0, 0x8\n\t"
166 "or\t%0, $1\n\t"
167 "li\t%1, 0\n"
168 "3:\t.set\tat\n\t"
169 ".section\t.fixup,\"ax\"\n\t"
170 "4:\tli\t%1, %3\n\t"
171 "j\t3b\n\t"
172 ".previous\n\t"
173 ".section\t__ex_table,\"a\"\n\t"
174 STR(PTR)"\t1b, 4b\n\t"
175 STR(PTR)"\t2b, 4b\n\t"
176 ".previous"
177 : "=&r" (value), "=r" (res)
178 : "r" (addr), "i" (-EFAULT));
179 if (res) 482 if (res)
180 goto fault; 483 goto fault;
181 compute_return_epc(regs); 484 compute_return_epc(regs);
@@ -186,26 +489,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
186 if (!access_ok(VERIFY_READ, addr, 4)) 489 if (!access_ok(VERIFY_READ, addr, 4))
187 goto sigbus; 490 goto sigbus;
188 491
189 __asm__ __volatile__ ( 492 LoadW(addr, value, res);
190#ifdef __BIG_ENDIAN
191 "1:\tlwl\t%0, (%2)\n"
192 "2:\tlwr\t%0, 3(%2)\n\t"
193#endif
194#ifdef __LITTLE_ENDIAN
195 "1:\tlwl\t%0, 3(%2)\n"
196 "2:\tlwr\t%0, (%2)\n\t"
197#endif
198 "li\t%1, 0\n"
199 "3:\t.section\t.fixup,\"ax\"\n\t"
200 "4:\tli\t%1, %3\n\t"
201 "j\t3b\n\t"
202 ".previous\n\t"
203 ".section\t__ex_table,\"a\"\n\t"
204 STR(PTR)"\t1b, 4b\n\t"
205 STR(PTR)"\t2b, 4b\n\t"
206 ".previous"
207 : "=&r" (value), "=r" (res)
208 : "r" (addr), "i" (-EFAULT));
209 if (res) 493 if (res)
210 goto fault; 494 goto fault;
211 compute_return_epc(regs); 495 compute_return_epc(regs);
@@ -216,30 +500,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
216 if (!access_ok(VERIFY_READ, addr, 2)) 500 if (!access_ok(VERIFY_READ, addr, 2))
217 goto sigbus; 501 goto sigbus;
218 502
219 __asm__ __volatile__ ( 503 LoadHWU(addr, value, res);
220 ".set\tnoat\n"
221#ifdef __BIG_ENDIAN
222 "1:\tlbu\t%0, 0(%2)\n"
223 "2:\tlbu\t$1, 1(%2)\n\t"
224#endif
225#ifdef __LITTLE_ENDIAN
226 "1:\tlbu\t%0, 1(%2)\n"
227 "2:\tlbu\t$1, 0(%2)\n\t"
228#endif
229 "sll\t%0, 0x8\n\t"
230 "or\t%0, $1\n\t"
231 "li\t%1, 0\n"
232 "3:\t.set\tat\n\t"
233 ".section\t.fixup,\"ax\"\n\t"
234 "4:\tli\t%1, %3\n\t"
235 "j\t3b\n\t"
236 ".previous\n\t"
237 ".section\t__ex_table,\"a\"\n\t"
238 STR(PTR)"\t1b, 4b\n\t"
239 STR(PTR)"\t2b, 4b\n\t"
240 ".previous"
241 : "=&r" (value), "=r" (res)
242 : "r" (addr), "i" (-EFAULT));
243 if (res) 504 if (res)
244 goto fault; 505 goto fault;
245 compute_return_epc(regs); 506 compute_return_epc(regs);
@@ -258,28 +519,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
258 if (!access_ok(VERIFY_READ, addr, 4)) 519 if (!access_ok(VERIFY_READ, addr, 4))
259 goto sigbus; 520 goto sigbus;
260 521
261 __asm__ __volatile__ ( 522 LoadWU(addr, value, res);
262#ifdef __BIG_ENDIAN
263 "1:\tlwl\t%0, (%2)\n"
264 "2:\tlwr\t%0, 3(%2)\n\t"
265#endif
266#ifdef __LITTLE_ENDIAN
267 "1:\tlwl\t%0, 3(%2)\n"
268 "2:\tlwr\t%0, (%2)\n\t"
269#endif
270 "dsll\t%0, %0, 32\n\t"
271 "dsrl\t%0, %0, 32\n\t"
272 "li\t%1, 0\n"
273 "3:\t.section\t.fixup,\"ax\"\n\t"
274 "4:\tli\t%1, %3\n\t"
275 "j\t3b\n\t"
276 ".previous\n\t"
277 ".section\t__ex_table,\"a\"\n\t"
278 STR(PTR)"\t1b, 4b\n\t"
279 STR(PTR)"\t2b, 4b\n\t"
280 ".previous"
281 : "=&r" (value), "=r" (res)
282 : "r" (addr), "i" (-EFAULT));
283 if (res) 523 if (res)
284 goto fault; 524 goto fault;
285 compute_return_epc(regs); 525 compute_return_epc(regs);
@@ -302,26 +542,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
302 if (!access_ok(VERIFY_READ, addr, 8)) 542 if (!access_ok(VERIFY_READ, addr, 8))
303 goto sigbus; 543 goto sigbus;
304 544
305 __asm__ __volatile__ ( 545 LoadDW(addr, value, res);
306#ifdef __BIG_ENDIAN
307 "1:\tldl\t%0, (%2)\n"
308 "2:\tldr\t%0, 7(%2)\n\t"
309#endif
310#ifdef __LITTLE_ENDIAN
311 "1:\tldl\t%0, 7(%2)\n"
312 "2:\tldr\t%0, (%2)\n\t"
313#endif
314 "li\t%1, 0\n"
315 "3:\t.section\t.fixup,\"ax\"\n\t"
316 "4:\tli\t%1, %3\n\t"
317 "j\t3b\n\t"
318 ".previous\n\t"
319 ".section\t__ex_table,\"a\"\n\t"
320 STR(PTR)"\t1b, 4b\n\t"
321 STR(PTR)"\t2b, 4b\n\t"
322 ".previous"
323 : "=&r" (value), "=r" (res)
324 : "r" (addr), "i" (-EFAULT));
325 if (res) 546 if (res)
326 goto fault; 547 goto fault;
327 compute_return_epc(regs); 548 compute_return_epc(regs);
@@ -336,68 +557,22 @@ static void emulate_load_store_insn(struct pt_regs *regs,
336 if (!access_ok(VERIFY_WRITE, addr, 2)) 557 if (!access_ok(VERIFY_WRITE, addr, 2))
337 goto sigbus; 558 goto sigbus;
338 559
560 compute_return_epc(regs);
339 value = regs->regs[insn.i_format.rt]; 561 value = regs->regs[insn.i_format.rt];
340 __asm__ __volatile__ ( 562 StoreHW(addr, value, res);
341#ifdef __BIG_ENDIAN
342 ".set\tnoat\n"
343 "1:\tsb\t%1, 1(%2)\n\t"
344 "srl\t$1, %1, 0x8\n"
345 "2:\tsb\t$1, 0(%2)\n\t"
346 ".set\tat\n\t"
347#endif
348#ifdef __LITTLE_ENDIAN
349 ".set\tnoat\n"
350 "1:\tsb\t%1, 0(%2)\n\t"
351 "srl\t$1,%1, 0x8\n"
352 "2:\tsb\t$1, 1(%2)\n\t"
353 ".set\tat\n\t"
354#endif
355 "li\t%0, 0\n"
356 "3:\n\t"
357 ".section\t.fixup,\"ax\"\n\t"
358 "4:\tli\t%0, %3\n\t"
359 "j\t3b\n\t"
360 ".previous\n\t"
361 ".section\t__ex_table,\"a\"\n\t"
362 STR(PTR)"\t1b, 4b\n\t"
363 STR(PTR)"\t2b, 4b\n\t"
364 ".previous"
365 : "=r" (res)
366 : "r" (value), "r" (addr), "i" (-EFAULT));
367 if (res) 563 if (res)
368 goto fault; 564 goto fault;
369 compute_return_epc(regs);
370 break; 565 break;
371 566
372 case sw_op: 567 case sw_op:
373 if (!access_ok(VERIFY_WRITE, addr, 4)) 568 if (!access_ok(VERIFY_WRITE, addr, 4))
374 goto sigbus; 569 goto sigbus;
375 570
571 compute_return_epc(regs);
376 value = regs->regs[insn.i_format.rt]; 572 value = regs->regs[insn.i_format.rt];
377 __asm__ __volatile__ ( 573 StoreW(addr, value, res);
378#ifdef __BIG_ENDIAN
379 "1:\tswl\t%1,(%2)\n"
380 "2:\tswr\t%1, 3(%2)\n\t"
381#endif
382#ifdef __LITTLE_ENDIAN
383 "1:\tswl\t%1, 3(%2)\n"
384 "2:\tswr\t%1, (%2)\n\t"
385#endif
386 "li\t%0, 0\n"
387 "3:\n\t"
388 ".section\t.fixup,\"ax\"\n\t"
389 "4:\tli\t%0, %3\n\t"
390 "j\t3b\n\t"
391 ".previous\n\t"
392 ".section\t__ex_table,\"a\"\n\t"
393 STR(PTR)"\t1b, 4b\n\t"
394 STR(PTR)"\t2b, 4b\n\t"
395 ".previous"
396 : "=r" (res)
397 : "r" (value), "r" (addr), "i" (-EFAULT));
398 if (res) 574 if (res)
399 goto fault; 575 goto fault;
400 compute_return_epc(regs);
401 break; 576 break;
402 577
403 case sd_op: 578 case sd_op:
@@ -412,31 +587,11 @@ static void emulate_load_store_insn(struct pt_regs *regs,
412 if (!access_ok(VERIFY_WRITE, addr, 8)) 587 if (!access_ok(VERIFY_WRITE, addr, 8))
413 goto sigbus; 588 goto sigbus;
414 589
590 compute_return_epc(regs);
415 value = regs->regs[insn.i_format.rt]; 591 value = regs->regs[insn.i_format.rt];
416 __asm__ __volatile__ ( 592 StoreDW(addr, value, res);
417#ifdef __BIG_ENDIAN
418 "1:\tsdl\t%1,(%2)\n"
419 "2:\tsdr\t%1, 7(%2)\n\t"
420#endif
421#ifdef __LITTLE_ENDIAN
422 "1:\tsdl\t%1, 7(%2)\n"
423 "2:\tsdr\t%1, (%2)\n\t"
424#endif
425 "li\t%0, 0\n"
426 "3:\n\t"
427 ".section\t.fixup,\"ax\"\n\t"
428 "4:\tli\t%0, %3\n\t"
429 "j\t3b\n\t"
430 ".previous\n\t"
431 ".section\t__ex_table,\"a\"\n\t"
432 STR(PTR)"\t1b, 4b\n\t"
433 STR(PTR)"\t2b, 4b\n\t"
434 ".previous"
435 : "=r" (res)
436 : "r" (value), "r" (addr), "i" (-EFAULT));
437 if (res) 593 if (res)
438 goto fault; 594 goto fault;
439 compute_return_epc(regs);
440 break; 595 break;
441#endif /* CONFIG_64BIT */ 596#endif /* CONFIG_64BIT */
442 597
@@ -447,10 +602,21 @@ static void emulate_load_store_insn(struct pt_regs *regs,
447 case ldc1_op: 602 case ldc1_op:
448 case swc1_op: 603 case swc1_op:
449 case sdc1_op: 604 case sdc1_op:
450 /* 605 die_if_kernel("Unaligned FP access in kernel code", regs);
451 * I herewith declare: this does not happen. So send SIGBUS. 606 BUG_ON(!used_math());
452 */ 607 BUG_ON(!is_fpu_owner());
453 goto sigbus; 608
609 lose_fpu(1); /* Save FPU state for the emulator. */
610 res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
611 &fault_addr);
612 own_fpu(1); /* Restore FPU state. */
613
614 /* Signal if something went wrong. */
615 process_fpemu_return(res, fault_addr);
616
617 if (res == 0)
618 break;
619 return;
454 620
455 /* 621 /*
456 * COP2 is available to implementor for application specific use. 622 * COP2 is available to implementor for application specific use.
@@ -488,6 +654,9 @@ static void emulate_load_store_insn(struct pt_regs *regs,
488 return; 654 return;
489 655
490fault: 656fault:
657 /* roll back jump/branch */
658 regs->cp0_epc = origpc;
659 regs->regs[31] = orig31;
491 /* Did we have an exception handler installed? */ 660 /* Did we have an exception handler installed? */
492 if (fixup_exception(regs)) 661 if (fixup_exception(regs))
493 return; 662 return;
@@ -504,10 +673,881 @@ sigbus:
504 return; 673 return;
505 674
506sigill: 675sigill:
507 die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs); 676 die_if_kernel
677 ("Unhandled kernel unaligned access or invalid instruction", regs);
508 force_sig(SIGILL, current); 678 force_sig(SIGILL, current);
509} 679}
510 680
681/* Recode table from 16-bit register notation to 32-bit GPR. */
682const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
683
684/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
685const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
686
687void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr)
688{
689 unsigned long value;
690 unsigned int res;
691 int i;
692 unsigned int reg = 0, rvar;
693 unsigned long orig31;
694 u16 __user *pc16;
695 u16 halfword;
696 unsigned int word;
697 unsigned long origpc, contpc;
698 union mips_instruction insn;
699 struct mm_decoded_insn mminsn;
700 void __user *fault_addr = NULL;
701
702 origpc = regs->cp0_epc;
703 orig31 = regs->regs[31];
704
705 mminsn.micro_mips_mode = 1;
706
707 /*
708 * This load never faults.
709 */
710 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
711 __get_user(halfword, pc16);
712 pc16++;
713 contpc = regs->cp0_epc + 2;
714 word = ((unsigned int)halfword << 16);
715 mminsn.pc_inc = 2;
716
717 if (!mm_insn_16bit(halfword)) {
718 __get_user(halfword, pc16);
719 pc16++;
720 contpc = regs->cp0_epc + 4;
721 mminsn.pc_inc = 4;
722 word |= halfword;
723 }
724 mminsn.insn = word;
725
726 if (get_user(halfword, pc16))
727 goto fault;
728 mminsn.next_pc_inc = 2;
729 word = ((unsigned int)halfword << 16);
730
731 if (!mm_insn_16bit(halfword)) {
732 pc16++;
733 if (get_user(halfword, pc16))
734 goto fault;
735 mminsn.next_pc_inc = 4;
736 word |= halfword;
737 }
738 mminsn.next_insn = word;
739
740 insn = (union mips_instruction)(mminsn.insn);
741 if (mm_isBranchInstr(regs, mminsn, &contpc))
742 insn = (union mips_instruction)(mminsn.next_insn);
743
744 /* Parse instruction to find what to do */
745
746 switch (insn.mm_i_format.opcode) {
747
748 case mm_pool32a_op:
749 switch (insn.mm_x_format.func) {
750 case mm_lwxs_op:
751 reg = insn.mm_x_format.rd;
752 goto loadW;
753 }
754
755 goto sigbus;
756
757 case mm_pool32b_op:
758 switch (insn.mm_m_format.func) {
759 case mm_lwp_func:
760 reg = insn.mm_m_format.rd;
761 if (reg == 31)
762 goto sigbus;
763
764 if (!access_ok(VERIFY_READ, addr, 8))
765 goto sigbus;
766
767 LoadW(addr, value, res);
768 if (res)
769 goto fault;
770 regs->regs[reg] = value;
771 addr += 4;
772 LoadW(addr, value, res);
773 if (res)
774 goto fault;
775 regs->regs[reg + 1] = value;
776 goto success;
777
778 case mm_swp_func:
779 reg = insn.mm_m_format.rd;
780 if (reg == 31)
781 goto sigbus;
782
783 if (!access_ok(VERIFY_WRITE, addr, 8))
784 goto sigbus;
785
786 value = regs->regs[reg];
787 StoreW(addr, value, res);
788 if (res)
789 goto fault;
790 addr += 4;
791 value = regs->regs[reg + 1];
792 StoreW(addr, value, res);
793 if (res)
794 goto fault;
795 goto success;
796
797 case mm_ldp_func:
798#ifdef CONFIG_64BIT
799 reg = insn.mm_m_format.rd;
800 if (reg == 31)
801 goto sigbus;
802
803 if (!access_ok(VERIFY_READ, addr, 16))
804 goto sigbus;
805
806 LoadDW(addr, value, res);
807 if (res)
808 goto fault;
809 regs->regs[reg] = value;
810 addr += 8;
811 LoadDW(addr, value, res);
812 if (res)
813 goto fault;
814 regs->regs[reg + 1] = value;
815 goto success;
816#endif /* CONFIG_64BIT */
817
818 goto sigill;
819
820 case mm_sdp_func:
821#ifdef CONFIG_64BIT
822 reg = insn.mm_m_format.rd;
823 if (reg == 31)
824 goto sigbus;
825
826 if (!access_ok(VERIFY_WRITE, addr, 16))
827 goto sigbus;
828
829 value = regs->regs[reg];
830 StoreDW(addr, value, res);
831 if (res)
832 goto fault;
833 addr += 8;
834 value = regs->regs[reg + 1];
835 StoreDW(addr, value, res);
836 if (res)
837 goto fault;
838 goto success;
839#endif /* CONFIG_64BIT */
840
841 goto sigill;
842
843 case mm_lwm32_func:
844 reg = insn.mm_m_format.rd;
845 rvar = reg & 0xf;
846 if ((rvar > 9) || !reg)
847 goto sigill;
848 if (reg & 0x10) {
849 if (!access_ok
850 (VERIFY_READ, addr, 4 * (rvar + 1)))
851 goto sigbus;
852 } else {
853 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
854 goto sigbus;
855 }
856 if (rvar == 9)
857 rvar = 8;
858 for (i = 16; rvar; rvar--, i++) {
859 LoadW(addr, value, res);
860 if (res)
861 goto fault;
862 addr += 4;
863 regs->regs[i] = value;
864 }
865 if ((reg & 0xf) == 9) {
866 LoadW(addr, value, res);
867 if (res)
868 goto fault;
869 addr += 4;
870 regs->regs[30] = value;
871 }
872 if (reg & 0x10) {
873 LoadW(addr, value, res);
874 if (res)
875 goto fault;
876 regs->regs[31] = value;
877 }
878 goto success;
879
880 case mm_swm32_func:
881 reg = insn.mm_m_format.rd;
882 rvar = reg & 0xf;
883 if ((rvar > 9) || !reg)
884 goto sigill;
885 if (reg & 0x10) {
886 if (!access_ok
887 (VERIFY_WRITE, addr, 4 * (rvar + 1)))
888 goto sigbus;
889 } else {
890 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
891 goto sigbus;
892 }
893 if (rvar == 9)
894 rvar = 8;
895 for (i = 16; rvar; rvar--, i++) {
896 value = regs->regs[i];
897 StoreW(addr, value, res);
898 if (res)
899 goto fault;
900 addr += 4;
901 }
902 if ((reg & 0xf) == 9) {
903 value = regs->regs[30];
904 StoreW(addr, value, res);
905 if (res)
906 goto fault;
907 addr += 4;
908 }
909 if (reg & 0x10) {
910 value = regs->regs[31];
911 StoreW(addr, value, res);
912 if (res)
913 goto fault;
914 }
915 goto success;
916
917 case mm_ldm_func:
918#ifdef CONFIG_64BIT
919 reg = insn.mm_m_format.rd;
920 rvar = reg & 0xf;
921 if ((rvar > 9) || !reg)
922 goto sigill;
923 if (reg & 0x10) {
924 if (!access_ok
925 (VERIFY_READ, addr, 8 * (rvar + 1)))
926 goto sigbus;
927 } else {
928 if (!access_ok(VERIFY_READ, addr, 8 * rvar))
929 goto sigbus;
930 }
931 if (rvar == 9)
932 rvar = 8;
933
934 for (i = 16; rvar; rvar--, i++) {
935 LoadDW(addr, value, res);
936 if (res)
937 goto fault;
938 addr += 4;
939 regs->regs[i] = value;
940 }
941 if ((reg & 0xf) == 9) {
942 LoadDW(addr, value, res);
943 if (res)
944 goto fault;
945 addr += 8;
946 regs->regs[30] = value;
947 }
948 if (reg & 0x10) {
949 LoadDW(addr, value, res);
950 if (res)
951 goto fault;
952 regs->regs[31] = value;
953 }
954 goto success;
955#endif /* CONFIG_64BIT */
956
957 goto sigill;
958
959 case mm_sdm_func:
960#ifdef CONFIG_64BIT
961 reg = insn.mm_m_format.rd;
962 rvar = reg & 0xf;
963 if ((rvar > 9) || !reg)
964 goto sigill;
965 if (reg & 0x10) {
966 if (!access_ok
967 (VERIFY_WRITE, addr, 8 * (rvar + 1)))
968 goto sigbus;
969 } else {
970 if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
971 goto sigbus;
972 }
973 if (rvar == 9)
974 rvar = 8;
975
976 for (i = 16; rvar; rvar--, i++) {
977 value = regs->regs[i];
978 StoreDW(addr, value, res);
979 if (res)
980 goto fault;
981 addr += 8;
982 }
983 if ((reg & 0xf) == 9) {
984 value = regs->regs[30];
985 StoreDW(addr, value, res);
986 if (res)
987 goto fault;
988 addr += 8;
989 }
990 if (reg & 0x10) {
991 value = regs->regs[31];
992 StoreDW(addr, value, res);
993 if (res)
994 goto fault;
995 }
996 goto success;
997#endif /* CONFIG_64BIT */
998
999 goto sigill;
1000
1001 /* LWC2, SWC2, LDC2, SDC2 are not serviced */
1002 }
1003
1004 goto sigbus;
1005
1006 case mm_pool32c_op:
1007 switch (insn.mm_m_format.func) {
1008 case mm_lwu_func:
1009 reg = insn.mm_m_format.rd;
1010 goto loadWU;
1011 }
1012
1013 /* LL,SC,LLD,SCD are not serviced */
1014 goto sigbus;
1015
1016 case mm_pool32f_op:
1017 switch (insn.mm_x_format.func) {
1018 case mm_lwxc1_func:
1019 case mm_swxc1_func:
1020 case mm_ldxc1_func:
1021 case mm_sdxc1_func:
1022 goto fpu_emul;
1023 }
1024
1025 goto sigbus;
1026
1027 case mm_ldc132_op:
1028 case mm_sdc132_op:
1029 case mm_lwc132_op:
1030 case mm_swc132_op:
1031fpu_emul:
1032 /* roll back jump/branch */
1033 regs->cp0_epc = origpc;
1034 regs->regs[31] = orig31;
1035
1036 die_if_kernel("Unaligned FP access in kernel code", regs);
1037 BUG_ON(!used_math());
1038 BUG_ON(!is_fpu_owner());
1039
1040 lose_fpu(1); /* save the FPU state for the emulator */
1041 res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1042 &fault_addr);
1043 own_fpu(1); /* restore FPU state */
1044
1045 /* If something went wrong, signal */
1046 process_fpemu_return(res, fault_addr);
1047
1048 if (res == 0)
1049 goto success;
1050 return;
1051
1052 case mm_lh32_op:
1053 reg = insn.mm_i_format.rt;
1054 goto loadHW;
1055
1056 case mm_lhu32_op:
1057 reg = insn.mm_i_format.rt;
1058 goto loadHWU;
1059
1060 case mm_lw32_op:
1061 reg = insn.mm_i_format.rt;
1062 goto loadW;
1063
1064 case mm_sh32_op:
1065 reg = insn.mm_i_format.rt;
1066 goto storeHW;
1067
1068 case mm_sw32_op:
1069 reg = insn.mm_i_format.rt;
1070 goto storeW;
1071
1072 case mm_ld32_op:
1073 reg = insn.mm_i_format.rt;
1074 goto loadDW;
1075
1076 case mm_sd32_op:
1077 reg = insn.mm_i_format.rt;
1078 goto storeDW;
1079
1080 case mm_pool16c_op:
1081 switch (insn.mm16_m_format.func) {
1082 case mm_lwm16_op:
1083 reg = insn.mm16_m_format.rlist;
1084 rvar = reg + 1;
1085 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1086 goto sigbus;
1087
1088 for (i = 16; rvar; rvar--, i++) {
1089 LoadW(addr, value, res);
1090 if (res)
1091 goto fault;
1092 addr += 4;
1093 regs->regs[i] = value;
1094 }
1095 LoadW(addr, value, res);
1096 if (res)
1097 goto fault;
1098 regs->regs[31] = value;
1099
1100 goto success;
1101
1102 case mm_swm16_op:
1103 reg = insn.mm16_m_format.rlist;
1104 rvar = reg + 1;
1105 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1106 goto sigbus;
1107
1108 for (i = 16; rvar; rvar--, i++) {
1109 value = regs->regs[i];
1110 StoreW(addr, value, res);
1111 if (res)
1112 goto fault;
1113 addr += 4;
1114 }
1115 value = regs->regs[31];
1116 StoreW(addr, value, res);
1117 if (res)
1118 goto fault;
1119
1120 goto success;
1121
1122 }
1123
1124 goto sigbus;
1125
1126 case mm_lhu16_op:
1127 reg = reg16to32[insn.mm16_rb_format.rt];
1128 goto loadHWU;
1129
1130 case mm_lw16_op:
1131 reg = reg16to32[insn.mm16_rb_format.rt];
1132 goto loadW;
1133
1134 case mm_sh16_op:
1135 reg = reg16to32st[insn.mm16_rb_format.rt];
1136 goto storeHW;
1137
1138 case mm_sw16_op:
1139 reg = reg16to32st[insn.mm16_rb_format.rt];
1140 goto storeW;
1141
1142 case mm_lwsp16_op:
1143 reg = insn.mm16_r5_format.rt;
1144 goto loadW;
1145
1146 case mm_swsp16_op:
1147 reg = insn.mm16_r5_format.rt;
1148 goto storeW;
1149
1150 case mm_lwgp16_op:
1151 reg = reg16to32[insn.mm16_r3_format.rt];
1152 goto loadW;
1153
1154 default:
1155 goto sigill;
1156 }
1157
1158loadHW:
1159 if (!access_ok(VERIFY_READ, addr, 2))
1160 goto sigbus;
1161
1162 LoadHW(addr, value, res);
1163 if (res)
1164 goto fault;
1165 regs->regs[reg] = value;
1166 goto success;
1167
1168loadHWU:
1169 if (!access_ok(VERIFY_READ, addr, 2))
1170 goto sigbus;
1171
1172 LoadHWU(addr, value, res);
1173 if (res)
1174 goto fault;
1175 regs->regs[reg] = value;
1176 goto success;
1177
1178loadW:
1179 if (!access_ok(VERIFY_READ, addr, 4))
1180 goto sigbus;
1181
1182 LoadW(addr, value, res);
1183 if (res)
1184 goto fault;
1185 regs->regs[reg] = value;
1186 goto success;
1187
1188loadWU:
1189#ifdef CONFIG_64BIT
1190 /*
1191 * A 32-bit kernel might be running on a 64-bit processor. But
1192 * if we're on a 32-bit processor and an i-cache incoherency
1193 * or race makes us see a 64-bit instruction here the sdl/sdr
1194 * would blow up, so for now we don't handle unaligned 64-bit
1195 * instructions on 32-bit kernels.
1196 */
1197 if (!access_ok(VERIFY_READ, addr, 4))
1198 goto sigbus;
1199
1200 LoadWU(addr, value, res);
1201 if (res)
1202 goto fault;
1203 regs->regs[reg] = value;
1204 goto success;
1205#endif /* CONFIG_64BIT */
1206
1207 /* Cannot handle 64-bit instructions in 32-bit kernel */
1208 goto sigill;
1209
1210loadDW:
1211#ifdef CONFIG_64BIT
1212 /*
1213 * A 32-bit kernel might be running on a 64-bit processor. But
1214 * if we're on a 32-bit processor and an i-cache incoherency
1215 * or race makes us see a 64-bit instruction here the sdl/sdr
1216 * would blow up, so for now we don't handle unaligned 64-bit
1217 * instructions on 32-bit kernels.
1218 */
1219 if (!access_ok(VERIFY_READ, addr, 8))
1220 goto sigbus;
1221
1222 LoadDW(addr, value, res);
1223 if (res)
1224 goto fault;
1225 regs->regs[reg] = value;
1226 goto success;
1227#endif /* CONFIG_64BIT */
1228
1229 /* Cannot handle 64-bit instructions in 32-bit kernel */
1230 goto sigill;
1231
1232storeHW:
1233 if (!access_ok(VERIFY_WRITE, addr, 2))
1234 goto sigbus;
1235
1236 value = regs->regs[reg];
1237 StoreHW(addr, value, res);
1238 if (res)
1239 goto fault;
1240 goto success;
1241
1242storeW:
1243 if (!access_ok(VERIFY_WRITE, addr, 4))
1244 goto sigbus;
1245
1246 value = regs->regs[reg];
1247 StoreW(addr, value, res);
1248 if (res)
1249 goto fault;
1250 goto success;
1251
1252storeDW:
1253#ifdef CONFIG_64BIT
1254 /*
1255 * A 32-bit kernel might be running on a 64-bit processor. But
1256 * if we're on a 32-bit processor and an i-cache incoherency
1257 * or race makes us see a 64-bit instruction here the sdl/sdr
1258 * would blow up, so for now we don't handle unaligned 64-bit
1259 * instructions on 32-bit kernels.
1260 */
1261 if (!access_ok(VERIFY_WRITE, addr, 8))
1262 goto sigbus;
1263
1264 value = regs->regs[reg];
1265 StoreDW(addr, value, res);
1266 if (res)
1267 goto fault;
1268 goto success;
1269#endif /* CONFIG_64BIT */
1270
1271 /* Cannot handle 64-bit instructions in 32-bit kernel */
1272 goto sigill;
1273
1274success:
1275 regs->cp0_epc = contpc; /* advance or branch */
1276
1277#ifdef CONFIG_DEBUG_FS
1278 unaligned_instructions++;
1279#endif
1280 return;
1281
1282fault:
1283 /* roll back jump/branch */
1284 regs->cp0_epc = origpc;
1285 regs->regs[31] = orig31;
1286 /* Did we have an exception handler installed? */
1287 if (fixup_exception(regs))
1288 return;
1289
1290 die_if_kernel("Unhandled kernel unaligned access", regs);
1291 force_sig(SIGSEGV, current);
1292
1293 return;
1294
1295sigbus:
1296 die_if_kernel("Unhandled kernel unaligned access", regs);
1297 force_sig(SIGBUS, current);
1298
1299 return;
1300
1301sigill:
1302 die_if_kernel
1303 ("Unhandled kernel unaligned access or invalid instruction", regs);
1304 force_sig(SIGILL, current);
1305}
1306
1307static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1308{
1309 unsigned long value;
1310 unsigned int res;
1311 int reg;
1312 unsigned long orig31;
1313 u16 __user *pc16;
1314 unsigned long origpc;
1315 union mips16e_instruction mips16inst, oldinst;
1316
1317 origpc = regs->cp0_epc;
1318 orig31 = regs->regs[31];
1319 pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1320 /*
1321 * This load never faults.
1322 */
1323 __get_user(mips16inst.full, pc16);
1324 oldinst = mips16inst;
1325
1326 /* skip EXTEND instruction */
1327 if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1328 pc16++;
1329 __get_user(mips16inst.full, pc16);
1330 } else if (delay_slot(regs)) {
1331 /* skip jump instructions */
1332 /* JAL/JALX are 32 bits but have OPCODE in first short int */
1333 if (mips16inst.ri.opcode == MIPS16e_jal_op)
1334 pc16++;
1335 pc16++;
1336 if (get_user(mips16inst.full, pc16))
1337 goto sigbus;
1338 }
1339
1340 switch (mips16inst.ri.opcode) {
1341 case MIPS16e_i64_op: /* I64 or RI64 instruction */
1342 switch (mips16inst.i64.func) { /* I64/RI64 func field check */
1343 case MIPS16e_ldpc_func:
1344 case MIPS16e_ldsp_func:
1345 reg = reg16to32[mips16inst.ri64.ry];
1346 goto loadDW;
1347
1348 case MIPS16e_sdsp_func:
1349 reg = reg16to32[mips16inst.ri64.ry];
1350 goto writeDW;
1351
1352 case MIPS16e_sdrasp_func:
1353 reg = 29; /* GPRSP */
1354 goto writeDW;
1355 }
1356
1357 goto sigbus;
1358
1359 case MIPS16e_swsp_op:
1360 case MIPS16e_lwpc_op:
1361 case MIPS16e_lwsp_op:
1362 reg = reg16to32[mips16inst.ri.rx];
1363 break;
1364
1365 case MIPS16e_i8_op:
1366 if (mips16inst.i8.func != MIPS16e_swrasp_func)
1367 goto sigbus;
1368 reg = 29; /* GPRSP */
1369 break;
1370
1371 default:
1372 reg = reg16to32[mips16inst.rri.ry];
1373 break;
1374 }
1375
1376 switch (mips16inst.ri.opcode) {
1377
1378 case MIPS16e_lb_op:
1379 case MIPS16e_lbu_op:
1380 case MIPS16e_sb_op:
1381 goto sigbus;
1382
1383 case MIPS16e_lh_op:
1384 if (!access_ok(VERIFY_READ, addr, 2))
1385 goto sigbus;
1386
1387 LoadHW(addr, value, res);
1388 if (res)
1389 goto fault;
1390 MIPS16e_compute_return_epc(regs, &oldinst);
1391 regs->regs[reg] = value;
1392 break;
1393
1394 case MIPS16e_lhu_op:
1395 if (!access_ok(VERIFY_READ, addr, 2))
1396 goto sigbus;
1397
1398 LoadHWU(addr, value, res);
1399 if (res)
1400 goto fault;
1401 MIPS16e_compute_return_epc(regs, &oldinst);
1402 regs->regs[reg] = value;
1403 break;
1404
1405 case MIPS16e_lw_op:
1406 case MIPS16e_lwpc_op:
1407 case MIPS16e_lwsp_op:
1408 if (!access_ok(VERIFY_READ, addr, 4))
1409 goto sigbus;
1410
1411 LoadW(addr, value, res);
1412 if (res)
1413 goto fault;
1414 MIPS16e_compute_return_epc(regs, &oldinst);
1415 regs->regs[reg] = value;
1416 break;
1417
1418 case MIPS16e_lwu_op:
1419#ifdef CONFIG_64BIT
1420 /*
1421 * A 32-bit kernel might be running on a 64-bit processor. But
1422 * if we're on a 32-bit processor and an i-cache incoherency
1423 * or race makes us see a 64-bit instruction here the sdl/sdr
1424 * would blow up, so for now we don't handle unaligned 64-bit
1425 * instructions on 32-bit kernels.
1426 */
1427 if (!access_ok(VERIFY_READ, addr, 4))
1428 goto sigbus;
1429
1430 LoadWU(addr, value, res);
1431 if (res)
1432 goto fault;
1433 MIPS16e_compute_return_epc(regs, &oldinst);
1434 regs->regs[reg] = value;
1435 break;
1436#endif /* CONFIG_64BIT */
1437
1438 /* Cannot handle 64-bit instructions in 32-bit kernel */
1439 goto sigill;
1440
1441 case MIPS16e_ld_op:
1442loadDW:
1443#ifdef CONFIG_64BIT
1444 /*
1445 * A 32-bit kernel might be running on a 64-bit processor. But
1446 * if we're on a 32-bit processor and an i-cache incoherency
1447 * or race makes us see a 64-bit instruction here the sdl/sdr
1448 * would blow up, so for now we don't handle unaligned 64-bit
1449 * instructions on 32-bit kernels.
1450 */
1451 if (!access_ok(VERIFY_READ, addr, 8))
1452 goto sigbus;
1453
1454 LoadDW(addr, value, res);
1455 if (res)
1456 goto fault;
1457 MIPS16e_compute_return_epc(regs, &oldinst);
1458 regs->regs[reg] = value;
1459 break;
1460#endif /* CONFIG_64BIT */
1461
1462 /* Cannot handle 64-bit instructions in 32-bit kernel */
1463 goto sigill;
1464
1465 case MIPS16e_sh_op:
1466 if (!access_ok(VERIFY_WRITE, addr, 2))
1467 goto sigbus;
1468
1469 MIPS16e_compute_return_epc(regs, &oldinst);
1470 value = regs->regs[reg];
1471 StoreHW(addr, value, res);
1472 if (res)
1473 goto fault;
1474 break;
1475
1476 case MIPS16e_sw_op:
1477 case MIPS16e_swsp_op:
1478 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
1479 if (!access_ok(VERIFY_WRITE, addr, 4))
1480 goto sigbus;
1481
1482 MIPS16e_compute_return_epc(regs, &oldinst);
1483 value = regs->regs[reg];
1484 StoreW(addr, value, res);
1485 if (res)
1486 goto fault;
1487 break;
1488
1489 case MIPS16e_sd_op:
1490writeDW:
1491#ifdef CONFIG_64BIT
1492 /*
1493 * A 32-bit kernel might be running on a 64-bit processor. But
1494 * if we're on a 32-bit processor and an i-cache incoherency
1495 * or race makes us see a 64-bit instruction here the sdl/sdr
1496 * would blow up, so for now we don't handle unaligned 64-bit
1497 * instructions on 32-bit kernels.
1498 */
1499 if (!access_ok(VERIFY_WRITE, addr, 8))
1500 goto sigbus;
1501
1502 MIPS16e_compute_return_epc(regs, &oldinst);
1503 value = regs->regs[reg];
1504 StoreDW(addr, value, res);
1505 if (res)
1506 goto fault;
1507 break;
1508#endif /* CONFIG_64BIT */
1509
1510 /* Cannot handle 64-bit instructions in 32-bit kernel */
1511 goto sigill;
1512
1513 default:
1514 /*
1515 * Pheeee... We encountered an yet unknown instruction or
1516 * cache coherence problem. Die sucker, die ...
1517 */
1518 goto sigill;
1519 }
1520
1521#ifdef CONFIG_DEBUG_FS
1522 unaligned_instructions++;
1523#endif
1524
1525 return;
1526
1527fault:
1528 /* roll back jump/branch */
1529 regs->cp0_epc = origpc;
1530 regs->regs[31] = orig31;
1531 /* Did we have an exception handler installed? */
1532 if (fixup_exception(regs))
1533 return;
1534
1535 die_if_kernel("Unhandled kernel unaligned access", regs);
1536 force_sig(SIGSEGV, current);
1537
1538 return;
1539
1540sigbus:
1541 die_if_kernel("Unhandled kernel unaligned access", regs);
1542 force_sig(SIGBUS, current);
1543
1544 return;
1545
1546sigill:
1547 die_if_kernel
1548 ("Unhandled kernel unaligned access or invalid instruction", regs);
1549 force_sig(SIGILL, current);
1550}
511asmlinkage void do_ade(struct pt_regs *regs) 1551asmlinkage void do_ade(struct pt_regs *regs)
512{ 1552{
513 unsigned int __user *pc; 1553 unsigned int __user *pc;
@@ -517,23 +1557,62 @@ asmlinkage void do_ade(struct pt_regs *regs)
517 1, regs, regs->cp0_badvaddr); 1557 1, regs, regs->cp0_badvaddr);
518 /* 1558 /*
519 * Did we catch a fault trying to load an instruction? 1559 * Did we catch a fault trying to load an instruction?
520 * Or are we running in MIPS16 mode?
521 */ 1560 */
522 if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1)) 1561 if (regs->cp0_badvaddr == regs->cp0_epc)
523 goto sigbus; 1562 goto sigbus;
524 1563
525 pc = (unsigned int __user *) exception_epc(regs);
526 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) 1564 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
527 goto sigbus; 1565 goto sigbus;
528 if (unaligned_action == UNALIGNED_ACTION_SIGNAL) 1566 if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
529 goto sigbus; 1567 goto sigbus;
530 else if (unaligned_action == UNALIGNED_ACTION_SHOW)
531 show_registers(regs);
532 1568
533 /* 1569 /*
534 * Do branch emulation only if we didn't forward the exception. 1570 * Do branch emulation only if we didn't forward the exception.
535 * This is all so but ugly ... 1571 * This is all so but ugly ...
536 */ 1572 */
1573
1574 /*
1575 * Are we running in microMIPS mode?
1576 */
1577 if (get_isa16_mode(regs->cp0_epc)) {
1578 /*
1579 * Did we catch a fault trying to load an instruction in
1580 * 16-bit mode?
1581 */
1582 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1583 goto sigbus;
1584 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1585 show_registers(regs);
1586
1587 if (cpu_has_mmips) {
1588 seg = get_fs();
1589 if (!user_mode(regs))
1590 set_fs(KERNEL_DS);
1591 emulate_load_store_microMIPS(regs,
1592 (void __user *)regs->cp0_badvaddr);
1593 set_fs(seg);
1594
1595 return;
1596 }
1597
1598 if (cpu_has_mips16) {
1599 seg = get_fs();
1600 if (!user_mode(regs))
1601 set_fs(KERNEL_DS);
1602 emulate_load_store_MIPS16e(regs,
1603 (void __user *)regs->cp0_badvaddr);
1604 set_fs(seg);
1605
1606 return;
1607 }
1608
1609 goto sigbus;
1610 }
1611
1612 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1613 show_registers(regs);
1614 pc = (unsigned int __user *)exception_epc(regs);
1615
537 seg = get_fs(); 1616 seg = get_fs();
538 if (!user_mode(regs)) 1617 if (!user_mode(regs))
539 set_fs(KERNEL_DS); 1618 set_fs(KERNEL_DS);
diff --git a/arch/mips/kvm/00README.txt b/arch/mips/kvm/00README.txt
new file mode 100644
index 000000000000..51617e481aa3
--- /dev/null
+++ b/arch/mips/kvm/00README.txt
@@ -0,0 +1,31 @@
1KVM/MIPS Trap & Emulate Release Notes
2=====================================
3
4(1) KVM/MIPS should support MIPS32R2 and beyond. It has been tested on the following platforms:
5 Malta Board with FPGA based 34K
6 Sigma Designs TangoX board with a 24K based 8654 SoC.
7 Malta Board with 74K @ 1GHz
8
9(2) Both Guest kernel and Guest Userspace execute in UM.
10 Guest User address space: 0x00000000 -> 0x40000000
11 Guest Kernel Unmapped: 0x40000000 -> 0x60000000
12 Guest Kernel Mapped: 0x60000000 -> 0x80000000
13
14 Guest Usermode virtual memory is limited to 1GB.
15
16(2) 16K Page Sizes: Both Host Kernel and Guest Kernel should have the same page size, currently at least 16K.
17 Note that due to cache aliasing issues, 4K page sizes are NOT supported.
18
19(3) No HugeTLB Support
20 Both the host kernel and Guest kernel should have the page size set to 16K.
21 This will be implemented in a future release.
22
23(4) KVM/MIPS does not have support for SMP Guests
24 Linux-3.7-rc2 based SMP guest hangs due to the following code sequence in the generated TLB handlers:
25 LL/TLBP/SC. Since the TLBP instruction causes a trap the reservation gets cleared
26 when we ERET back to the guest. This causes the guest to hang in an infinite loop.
27 This will be fixed in a future release.
28
29(5) Use Host FPU
30 Currently KVM/MIPS emulates a 24K CPU without a FPU.
31 This will be fixed in a future release
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
new file mode 100644
index 000000000000..2c15590e55f7
--- /dev/null
+++ b/arch/mips/kvm/Kconfig
@@ -0,0 +1,49 @@
1#
2# KVM configuration
3#
4source "virt/kvm/Kconfig"
5
6menuconfig VIRTUALIZATION
7 bool "Virtualization"
8 depends on HAVE_KVM
9 ---help---
10 Say Y here to get to see options for using your Linux host to run
11 other operating systems inside virtual machines (guests).
12 This option alone does not add any kernel code.
13
14 If you say N, all options in this submenu will be skipped and disabled.
15
16if VIRTUALIZATION
17
18config KVM
19 tristate "Kernel-based Virtual Machine (KVM) support"
20 depends on HAVE_KVM
21 select PREEMPT_NOTIFIERS
22 select ANON_INODES
23 select KVM_MMIO
24 ---help---
25 Support for hosting Guest kernels.
26 Currently supported on MIPS32 processors.
27
28config KVM_MIPS_DYN_TRANS
29 bool "KVM/MIPS: Dynamic binary translation to reduce traps"
30 depends on KVM
31 ---help---
32 When running in Trap & Emulate mode patch privileged
33 instructions to reduce the number of traps.
34
35 If unsure, say Y.
36
37config KVM_MIPS_DEBUG_COP0_COUNTERS
38 bool "Maintain counters for COP0 accesses"
39 depends on KVM
40 ---help---
41 Maintain statistics for Guest COP0 accesses.
42 A histogram of COP0 accesses is printed when the VM is
43 shutdown.
44
45 If unsure, say N.
46
47source drivers/vhost/Kconfig
48
49endif # VIRTUALIZATION
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
new file mode 100644
index 000000000000..78d87bbc99db
--- /dev/null
+++ b/arch/mips/kvm/Makefile
@@ -0,0 +1,13 @@
1# Makefile for KVM support for MIPS
2#
3
4common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
5
6EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
7
8kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \
9 kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \
10 kvm_mips_dyntrans.o kvm_trap_emul.o
11
12obj-$(CONFIG_KVM) += kvm.o
13obj-y += kvm_cb.o kvm_tlb.o
diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/kvm_cb.c
new file mode 100644
index 000000000000..313c2e37b978
--- /dev/null
+++ b/arch/mips/kvm/kvm_cb.c
@@ -0,0 +1,14 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 * Authors: Yann Le Du <ledu@kymasys.com>
8 */
9
10#include <linux/export.h>
11#include <linux/kvm_host.h>
12
13struct kvm_mips_callbacks *kvm_mips_callbacks;
14EXPORT_SYMBOL(kvm_mips_callbacks);
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
new file mode 100644
index 000000000000..dca2aa665993
--- /dev/null
+++ b/arch/mips/kvm/kvm_locore.S
@@ -0,0 +1,650 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Main entry point for the guest, exception handling.
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <asm/asm.h>
13#include <asm/asmmacro.h>
14#include <asm/regdef.h>
15#include <asm/mipsregs.h>
16#include <asm/stackframe.h>
17#include <asm/asm-offsets.h>
18
19
20#define _C_LABEL(x) x
21#define MIPSX(name) mips32_ ## name
22#define CALLFRAME_SIZ 32
23
24/*
25 * VECTOR
26 * exception vector entrypoint
27 */
28#define VECTOR(x, regmask) \
29 .ent _C_LABEL(x),0; \
30 EXPORT(x);
31
32#define VECTOR_END(x) \
33 EXPORT(x);
34
35/* Overload, Danger Will Robinson!! */
36#define PT_HOST_ASID PT_BVADDR
37#define PT_HOST_USERLOCAL PT_EPC
38
39#define CP0_DDATA_LO $28,3
40#define CP0_EBASE $15,1
41
42#define CP0_INTCTL $12,1
43#define CP0_SRSCTL $12,2
44#define CP0_SRSMAP $12,3
45#define CP0_HWRENA $7,0
46
47/* Resume Flags */
48#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
49
50#define RESUME_GUEST 0
51#define RESUME_HOST RESUME_FLAG_HOST
52
53/*
54 * __kvm_mips_vcpu_run: entry point to the guest
55 * a0: run
56 * a1: vcpu
57 */
58
59FEXPORT(__kvm_mips_vcpu_run)
60 .set push
61 .set noreorder
62 .set noat
63
64 /* k0/k1 not being used in host kernel context */
65 addiu k1,sp, -PT_SIZE
66 LONG_S $0, PT_R0(k1)
67 LONG_S $1, PT_R1(k1)
68 LONG_S $2, PT_R2(k1)
69 LONG_S $3, PT_R3(k1)
70
71 LONG_S $4, PT_R4(k1)
72 LONG_S $5, PT_R5(k1)
73 LONG_S $6, PT_R6(k1)
74 LONG_S $7, PT_R7(k1)
75
76 LONG_S $8, PT_R8(k1)
77 LONG_S $9, PT_R9(k1)
78 LONG_S $10, PT_R10(k1)
79 LONG_S $11, PT_R11(k1)
80 LONG_S $12, PT_R12(k1)
81 LONG_S $13, PT_R13(k1)
82 LONG_S $14, PT_R14(k1)
83 LONG_S $15, PT_R15(k1)
84 LONG_S $16, PT_R16(k1)
85 LONG_S $17, PT_R17(k1)
86
87 LONG_S $18, PT_R18(k1)
88 LONG_S $19, PT_R19(k1)
89 LONG_S $20, PT_R20(k1)
90 LONG_S $21, PT_R21(k1)
91 LONG_S $22, PT_R22(k1)
92 LONG_S $23, PT_R23(k1)
93 LONG_S $24, PT_R24(k1)
94 LONG_S $25, PT_R25(k1)
95
96 /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */
97
98 LONG_S $28, PT_R28(k1)
99 LONG_S $29, PT_R29(k1)
100 LONG_S $30, PT_R30(k1)
101 LONG_S $31, PT_R31(k1)
102
103 /* Save hi/lo */
104 mflo v0
105 LONG_S v0, PT_LO(k1)
106 mfhi v1
107 LONG_S v1, PT_HI(k1)
108
109 /* Save host status */
110 mfc0 v0, CP0_STATUS
111 LONG_S v0, PT_STATUS(k1)
112
113 /* Save host ASID, shove it into the BVADDR location */
114 mfc0 v1,CP0_ENTRYHI
115 andi v1, 0xff
116 LONG_S v1, PT_HOST_ASID(k1)
117
118 /* Save DDATA_LO, will be used to store pointer to vcpu */
119 mfc0 v1, CP0_DDATA_LO
120 LONG_S v1, PT_HOST_USERLOCAL(k1)
121
122 /* DDATA_LO has pointer to vcpu */
123 mtc0 a1,CP0_DDATA_LO
124
125 /* Offset into vcpu->arch */
126 addiu k1, a1, VCPU_HOST_ARCH
127
128 /* Save the host stack to VCPU, used for exception processing when we exit from the Guest */
129 LONG_S sp, VCPU_HOST_STACK(k1)
130
131 /* Save the kernel gp as well */
132 LONG_S gp, VCPU_HOST_GP(k1)
133
134 /* Setup status register for running the guest in UM, interrupts are disabled */
135 li k0,(ST0_EXL | KSU_USER| ST0_BEV)
136 mtc0 k0,CP0_STATUS
137 ehb
138
139 /* load up the new EBASE */
140 LONG_L k0, VCPU_GUEST_EBASE(k1)
141 mtc0 k0,CP0_EBASE
142
143 /* Now that the new EBASE has been loaded, unset BEV, set interrupt mask as it was
144 * but make sure that timer interrupts are enabled
145 */
146 li k0,(ST0_EXL | KSU_USER | ST0_IE)
147 andi v0, v0, ST0_IM
148 or k0, k0, v0
149 mtc0 k0,CP0_STATUS
150 ehb
151
152
153 /* Set Guest EPC */
154 LONG_L t0, VCPU_PC(k1)
155 mtc0 t0, CP0_EPC
156
157FEXPORT(__kvm_mips_load_asid)
158 /* Set the ASID for the Guest Kernel */
159 sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
160 /* addresses shift to 0x80000000 */
161 bltz t0, 1f /* If kernel */
162 addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
163 addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */
1641:
165 /* t1: contains the base of the ASID array, need to get the cpu id */
166 LONG_L t2, TI_CPU($28) /* smp_processor_id */
167 sll t2, t2, 2 /* x4 */
168 addu t3, t1, t2
169 LONG_L k0, (t3)
170 andi k0, k0, 0xff
171 mtc0 k0,CP0_ENTRYHI
172 ehb
173
174 /* Disable RDHWR access */
175 mtc0 zero, CP0_HWRENA
176
177 /* Now load up the Guest Context from VCPU */
178 LONG_L $1, VCPU_R1(k1)
179 LONG_L $2, VCPU_R2(k1)
180 LONG_L $3, VCPU_R3(k1)
181
182 LONG_L $4, VCPU_R4(k1)
183 LONG_L $5, VCPU_R5(k1)
184 LONG_L $6, VCPU_R6(k1)
185 LONG_L $7, VCPU_R7(k1)
186
187 LONG_L $8, VCPU_R8(k1)
188 LONG_L $9, VCPU_R9(k1)
189 LONG_L $10, VCPU_R10(k1)
190 LONG_L $11, VCPU_R11(k1)
191 LONG_L $12, VCPU_R12(k1)
192 LONG_L $13, VCPU_R13(k1)
193 LONG_L $14, VCPU_R14(k1)
194 LONG_L $15, VCPU_R15(k1)
195 LONG_L $16, VCPU_R16(k1)
196 LONG_L $17, VCPU_R17(k1)
197 LONG_L $18, VCPU_R18(k1)
198 LONG_L $19, VCPU_R19(k1)
199 LONG_L $20, VCPU_R20(k1)
200 LONG_L $21, VCPU_R21(k1)
201 LONG_L $22, VCPU_R22(k1)
202 LONG_L $23, VCPU_R23(k1)
203 LONG_L $24, VCPU_R24(k1)
204 LONG_L $25, VCPU_R25(k1)
205
206 /* k0/k1 loaded up later */
207
208 LONG_L $28, VCPU_R28(k1)
209 LONG_L $29, VCPU_R29(k1)
210 LONG_L $30, VCPU_R30(k1)
211 LONG_L $31, VCPU_R31(k1)
212
213 /* Restore hi/lo */
214 LONG_L k0, VCPU_LO(k1)
215 mtlo k0
216
217 LONG_L k0, VCPU_HI(k1)
218 mthi k0
219
220FEXPORT(__kvm_mips_load_k0k1)
221 /* Restore the guest's k0/k1 registers */
222 LONG_L k0, VCPU_R26(k1)
223 LONG_L k1, VCPU_R27(k1)
224
225 /* Jump to guest */
226 eret
227 .set pop
228
229VECTOR(MIPSX(exception), unknown)
230/*
231 * Find out what mode we came from and jump to the proper handler.
232 */
233 .set push
234 .set noat
235 .set noreorder
236 mtc0 k0, CP0_ERROREPC #01: Save guest k0
237 ehb #02:
238
239 mfc0 k0, CP0_EBASE #02: Get EBASE
240 srl k0, k0, 10 #03: Get rid of CPUNum
241 sll k0, k0, 10 #04
242 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
243 addiu k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000
244 j k0 #07: jump to the function
245 nop #08: branch delay slot
246 .set push
247VECTOR_END(MIPSX(exceptionEnd))
248.end MIPSX(exception)
249
250/*
251 * Generic Guest exception handler. We end up here when the guest
252 * does something that causes a trap to kernel mode.
253 *
254 */
255NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
256 .set push
257 .set noat
258 .set noreorder
259
260 /* Get the VCPU pointer from DDTATA_LO */
261 mfc0 k1, CP0_DDATA_LO
262 addiu k1, k1, VCPU_HOST_ARCH
263
264 /* Start saving Guest context to VCPU */
265 LONG_S $0, VCPU_R0(k1)
266 LONG_S $1, VCPU_R1(k1)
267 LONG_S $2, VCPU_R2(k1)
268 LONG_S $3, VCPU_R3(k1)
269 LONG_S $4, VCPU_R4(k1)
270 LONG_S $5, VCPU_R5(k1)
271 LONG_S $6, VCPU_R6(k1)
272 LONG_S $7, VCPU_R7(k1)
273 LONG_S $8, VCPU_R8(k1)
274 LONG_S $9, VCPU_R9(k1)
275 LONG_S $10, VCPU_R10(k1)
276 LONG_S $11, VCPU_R11(k1)
277 LONG_S $12, VCPU_R12(k1)
278 LONG_S $13, VCPU_R13(k1)
279 LONG_S $14, VCPU_R14(k1)
280 LONG_S $15, VCPU_R15(k1)
281 LONG_S $16, VCPU_R16(k1)
282 LONG_S $17,VCPU_R17(k1)
283 LONG_S $18, VCPU_R18(k1)
284 LONG_S $19, VCPU_R19(k1)
285 LONG_S $20, VCPU_R20(k1)
286 LONG_S $21, VCPU_R21(k1)
287 LONG_S $22, VCPU_R22(k1)
288 LONG_S $23, VCPU_R23(k1)
289 LONG_S $24, VCPU_R24(k1)
290 LONG_S $25, VCPU_R25(k1)
291
292 /* Guest k0/k1 saved later */
293
294 LONG_S $28, VCPU_R28(k1)
295 LONG_S $29, VCPU_R29(k1)
296 LONG_S $30, VCPU_R30(k1)
297 LONG_S $31, VCPU_R31(k1)
298
299 /* We need to save hi/lo and restore them on
300 * the way out
301 */
302 mfhi t0
303 LONG_S t0, VCPU_HI(k1)
304
305 mflo t0
306 LONG_S t0, VCPU_LO(k1)
307
308 /* Finally save guest k0/k1 to VCPU */
309 mfc0 t0, CP0_ERROREPC
310 LONG_S t0, VCPU_R26(k1)
311
312 /* Get GUEST k1 and save it in VCPU */
313 la t1, ~0x2ff
314 mfc0 t0, CP0_EBASE
315 and t0, t0, t1
316 LONG_L t0, 0x3000(t0)
317 LONG_S t0, VCPU_R27(k1)
318
319 /* Now that context has been saved, we can use other registers */
320
321 /* Restore vcpu */
322 mfc0 a1, CP0_DDATA_LO
323 move s1, a1
324
325 /* Restore run (vcpu->run) */
326 LONG_L a0, VCPU_RUN(a1)
327 /* Save pointer to run in s0, will be saved by the compiler */
328 move s0, a0
329
330
331 /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process the exception */
332 mfc0 k0,CP0_EPC
333 LONG_S k0, VCPU_PC(k1)
334
335 mfc0 k0, CP0_BADVADDR
336 LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1)
337
338 mfc0 k0, CP0_CAUSE
339 LONG_S k0, VCPU_HOST_CP0_CAUSE(k1)
340
341 mfc0 k0, CP0_ENTRYHI
342 LONG_S k0, VCPU_HOST_ENTRYHI(k1)
343
344 /* Now restore the host state just enough to run the handlers */
345
346 /* Swtich EBASE to the one used by Linux */
347 /* load up the host EBASE */
348 mfc0 v0, CP0_STATUS
349
350 .set at
351 or k0, v0, ST0_BEV
352 .set noat
353
354 mtc0 k0, CP0_STATUS
355 ehb
356
357 LONG_L k0, VCPU_HOST_EBASE(k1)
358 mtc0 k0,CP0_EBASE
359
360
361 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
362 .set at
363 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
364 or v0, v0, ST0_CU0
365 .set noat
366 mtc0 v0, CP0_STATUS
367 ehb
368
369 /* Load up host GP */
370 LONG_L gp, VCPU_HOST_GP(k1)
371
372 /* Need a stack before we can jump to "C" */
373 LONG_L sp, VCPU_HOST_STACK(k1)
374
375 /* Saved host state */
376 addiu sp,sp, -PT_SIZE
377
378 /* XXXKYMA do we need to load the host ASID, maybe not because the
379 * kernel entries are marked GLOBAL, need to verify
380 */
381
382 /* Restore host DDATA_LO */
383 LONG_L k0, PT_HOST_USERLOCAL(sp)
384 mtc0 k0, CP0_DDATA_LO
385
386 /* Restore RDHWR access */
387 la k0, 0x2000000F
388 mtc0 k0, CP0_HWRENA
389
390 /* Jump to handler */
391FEXPORT(__kvm_mips_jump_to_handler)
392 /* XXXKYMA: not sure if this is safe, how large is the stack?? */
393 /* Now jump to the kvm_mips_handle_exit() to see if we can deal with this in the kernel */
394 la t9,kvm_mips_handle_exit
395 jalr.hb t9
396 addiu sp,sp, -CALLFRAME_SIZ /* BD Slot */
397
398 /* Return from handler Make sure interrupts are disabled */
399 di
400 ehb
401
402 /* XXXKYMA: k0/k1 could have been blown away if we processed an exception
403 * while we were handling the exception from the guest, reload k1
404 */
405 move k1, s1
406 addiu k1, k1, VCPU_HOST_ARCH
407
408 /* Check return value, should tell us if we are returning to the host (handle I/O etc)
409 * or resuming the guest
410 */
411 andi t0, v0, RESUME_HOST
412 bnez t0, __kvm_mips_return_to_host
413 nop
414
415__kvm_mips_return_to_guest:
416 /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
417 mtc0 s1, CP0_DDATA_LO
418
419 /* Load up the Guest EBASE to minimize the window where BEV is set */
420 LONG_L t0, VCPU_GUEST_EBASE(k1)
421
422 /* Switch EBASE back to the one used by KVM */
423 mfc0 v1, CP0_STATUS
424 .set at
425 or k0, v1, ST0_BEV
426 .set noat
427 mtc0 k0, CP0_STATUS
428 ehb
429 mtc0 t0,CP0_EBASE
430
431 /* Setup status register for running guest in UM */
432 .set at
433 or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
434 and v1, v1, ~ST0_CU0
435 .set noat
436 mtc0 v1, CP0_STATUS
437 ehb
438
439
440 /* Set Guest EPC */
441 LONG_L t0, VCPU_PC(k1)
442 mtc0 t0, CP0_EPC
443
444 /* Set the ASID for the Guest Kernel */
445 sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
446 /* addresses shift to 0x80000000 */
447 bltz t0, 1f /* If kernel */
448 addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
449 addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */
4501:
451 /* t1: contains the base of the ASID array, need to get the cpu id */
452 LONG_L t2, TI_CPU($28) /* smp_processor_id */
453 sll t2, t2, 2 /* x4 */
454 addu t3, t1, t2
455 LONG_L k0, (t3)
456 andi k0, k0, 0xff
457 mtc0 k0,CP0_ENTRYHI
458 ehb
459
460 /* Disable RDHWR access */
461 mtc0 zero, CP0_HWRENA
462
463 /* load the guest context from VCPU and return */
464 LONG_L $0, VCPU_R0(k1)
465 LONG_L $1, VCPU_R1(k1)
466 LONG_L $2, VCPU_R2(k1)
467 LONG_L $3, VCPU_R3(k1)
468 LONG_L $4, VCPU_R4(k1)
469 LONG_L $5, VCPU_R5(k1)
470 LONG_L $6, VCPU_R6(k1)
471 LONG_L $7, VCPU_R7(k1)
472 LONG_L $8, VCPU_R8(k1)
473 LONG_L $9, VCPU_R9(k1)
474 LONG_L $10, VCPU_R10(k1)
475 LONG_L $11, VCPU_R11(k1)
476 LONG_L $12, VCPU_R12(k1)
477 LONG_L $13, VCPU_R13(k1)
478 LONG_L $14, VCPU_R14(k1)
479 LONG_L $15, VCPU_R15(k1)
480 LONG_L $16, VCPU_R16(k1)
481 LONG_L $17, VCPU_R17(k1)
482 LONG_L $18, VCPU_R18(k1)
483 LONG_L $19, VCPU_R19(k1)
484 LONG_L $20, VCPU_R20(k1)
485 LONG_L $21, VCPU_R21(k1)
486 LONG_L $22, VCPU_R22(k1)
487 LONG_L $23, VCPU_R23(k1)
488 LONG_L $24, VCPU_R24(k1)
489 LONG_L $25, VCPU_R25(k1)
490
491 /* $/k1 loaded later */
492 LONG_L $28, VCPU_R28(k1)
493 LONG_L $29, VCPU_R29(k1)
494 LONG_L $30, VCPU_R30(k1)
495 LONG_L $31, VCPU_R31(k1)
496
497FEXPORT(__kvm_mips_skip_guest_restore)
498 LONG_L k0, VCPU_HI(k1)
499 mthi k0
500
501 LONG_L k0, VCPU_LO(k1)
502 mtlo k0
503
504 LONG_L k0, VCPU_R26(k1)
505 LONG_L k1, VCPU_R27(k1)
506
507 eret
508
509__kvm_mips_return_to_host:
510 /* EBASE is already pointing to Linux */
511 LONG_L k1, VCPU_HOST_STACK(k1)
512 addiu k1,k1, -PT_SIZE
513
514 /* Restore host DDATA_LO */
515 LONG_L k0, PT_HOST_USERLOCAL(k1)
516 mtc0 k0, CP0_DDATA_LO
517
518 /* Restore host ASID */
519 LONG_L k0, PT_HOST_ASID(sp)
520 andi k0, 0xff
521 mtc0 k0,CP0_ENTRYHI
522 ehb
523
524 /* Load context saved on the host stack */
525 LONG_L $0, PT_R0(k1)
526 LONG_L $1, PT_R1(k1)
527
528 /* r2/v0 is the return code, shift it down by 2 (arithmetic) to recover the err code */
529 sra k0, v0, 2
530 move $2, k0
531
532 LONG_L $3, PT_R3(k1)
533 LONG_L $4, PT_R4(k1)
534 LONG_L $5, PT_R5(k1)
535 LONG_L $6, PT_R6(k1)
536 LONG_L $7, PT_R7(k1)
537 LONG_L $8, PT_R8(k1)
538 LONG_L $9, PT_R9(k1)
539 LONG_L $10, PT_R10(k1)
540 LONG_L $11, PT_R11(k1)
541 LONG_L $12, PT_R12(k1)
542 LONG_L $13, PT_R13(k1)
543 LONG_L $14, PT_R14(k1)
544 LONG_L $15, PT_R15(k1)
545 LONG_L $16, PT_R16(k1)
546 LONG_L $17, PT_R17(k1)
547 LONG_L $18, PT_R18(k1)
548 LONG_L $19, PT_R19(k1)
549 LONG_L $20, PT_R20(k1)
550 LONG_L $21, PT_R21(k1)
551 LONG_L $22, PT_R22(k1)
552 LONG_L $23, PT_R23(k1)
553 LONG_L $24, PT_R24(k1)
554 LONG_L $25, PT_R25(k1)
555
556 /* Host k0/k1 were not saved */
557
558 LONG_L $28, PT_R28(k1)
559 LONG_L $29, PT_R29(k1)
560 LONG_L $30, PT_R30(k1)
561
562 LONG_L k0, PT_HI(k1)
563 mthi k0
564
565 LONG_L k0, PT_LO(k1)
566 mtlo k0
567
568 /* Restore RDHWR access */
569 la k0, 0x2000000F
570 mtc0 k0, CP0_HWRENA
571
572
573 /* Restore RA, which is the address we will return to */
574 LONG_L ra, PT_R31(k1)
575 j ra
576 nop
577
578 .set pop
579VECTOR_END(MIPSX(GuestExceptionEnd))
580.end MIPSX(GuestException)
581
582MIPSX(exceptions):
583 ####
584 ##### The exception handlers.
585 #####
586 .word _C_LABEL(MIPSX(GuestException)) # 0
587 .word _C_LABEL(MIPSX(GuestException)) # 1
588 .word _C_LABEL(MIPSX(GuestException)) # 2
589 .word _C_LABEL(MIPSX(GuestException)) # 3
590 .word _C_LABEL(MIPSX(GuestException)) # 4
591 .word _C_LABEL(MIPSX(GuestException)) # 5
592 .word _C_LABEL(MIPSX(GuestException)) # 6
593 .word _C_LABEL(MIPSX(GuestException)) # 7
594 .word _C_LABEL(MIPSX(GuestException)) # 8
595 .word _C_LABEL(MIPSX(GuestException)) # 9
596 .word _C_LABEL(MIPSX(GuestException)) # 10
597 .word _C_LABEL(MIPSX(GuestException)) # 11
598 .word _C_LABEL(MIPSX(GuestException)) # 12
599 .word _C_LABEL(MIPSX(GuestException)) # 13
600 .word _C_LABEL(MIPSX(GuestException)) # 14
601 .word _C_LABEL(MIPSX(GuestException)) # 15
602 .word _C_LABEL(MIPSX(GuestException)) # 16
603 .word _C_LABEL(MIPSX(GuestException)) # 17
604 .word _C_LABEL(MIPSX(GuestException)) # 18
605 .word _C_LABEL(MIPSX(GuestException)) # 19
606 .word _C_LABEL(MIPSX(GuestException)) # 20
607 .word _C_LABEL(MIPSX(GuestException)) # 21
608 .word _C_LABEL(MIPSX(GuestException)) # 22
609 .word _C_LABEL(MIPSX(GuestException)) # 23
610 .word _C_LABEL(MIPSX(GuestException)) # 24
611 .word _C_LABEL(MIPSX(GuestException)) # 25
612 .word _C_LABEL(MIPSX(GuestException)) # 26
613 .word _C_LABEL(MIPSX(GuestException)) # 27
614 .word _C_LABEL(MIPSX(GuestException)) # 28
615 .word _C_LABEL(MIPSX(GuestException)) # 29
616 .word _C_LABEL(MIPSX(GuestException)) # 30
617 .word _C_LABEL(MIPSX(GuestException)) # 31
618
619
620/* This routine makes changes to the instruction stream effective to the hardware.
621 * It should be called after the instruction stream is written.
622 * On return, the new instructions are effective.
623 * Inputs:
624 * a0 = Start address of new instruction stream
625 * a1 = Size, in bytes, of new instruction stream
626 */
627
628#define HW_SYNCI_Step $1
629LEAF(MIPSX(SyncICache))
630 .set push
631 .set mips32r2
632 beq a1, zero, 20f
633 nop
634 addu a1, a0, a1
635 rdhwr v0, HW_SYNCI_Step
636 beq v0, zero, 20f
637 nop
638
63910:
640 synci 0(a0)
641 addu a0, a0, v0
642 sltu v1, a0, a1
643 bne v1, zero, 10b
644 nop
645 sync
64620:
647 jr.hb ra
648 nop
649 .set pop
650END(MIPSX(SyncICache))
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
new file mode 100644
index 000000000000..e0dad0289797
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips.c
@@ -0,0 +1,958 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: MIPS specific KVM APIs
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/vmalloc.h>
16#include <linux/fs.h>
17#include <linux/bootmem.h>
18#include <asm/page.h>
19#include <asm/cacheflush.h>
20#include <asm/mmu_context.h>
21
22#include <linux/kvm_host.h>
23
24#include "kvm_mips_int.h"
25#include "kvm_mips_comm.h"
26
27#define CREATE_TRACE_POINTS
28#include "trace.h"
29
30#ifndef VECTORSPACING
31#define VECTORSPACING 0x100 /* for EI/VI mode */
32#endif
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35struct kvm_stats_debugfs_item debugfs_entries[] = {
36 { "wait", VCPU_STAT(wait_exits) },
37 { "cache", VCPU_STAT(cache_exits) },
38 { "signal", VCPU_STAT(signal_exits) },
39 { "interrupt", VCPU_STAT(int_exits) },
40 { "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
41 { "tlbmod", VCPU_STAT(tlbmod_exits) },
42 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
43 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
44 { "addrerr_st", VCPU_STAT(addrerr_st_exits) },
45 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
46 { "syscall", VCPU_STAT(syscall_exits) },
47 { "resvd_inst", VCPU_STAT(resvd_inst_exits) },
48 { "break_inst", VCPU_STAT(break_inst_exits) },
49 { "flush_dcache", VCPU_STAT(flush_dcache_exits) },
50 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
51 {NULL}
52};
53
54static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
55{
56 int i;
57 for_each_possible_cpu(i) {
58 vcpu->arch.guest_kernel_asid[i] = 0;
59 vcpu->arch.guest_user_asid[i] = 0;
60 }
61 return 0;
62}
63
64gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
65{
66 return gfn;
67}
68
69/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
70 * are "runnable" if interrupts are pending
71 */
72int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
73{
74 return !!(vcpu->arch.pending_exceptions);
75}
76
77int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
78{
79 return 1;
80}
81
82int kvm_arch_hardware_enable(void *garbage)
83{
84 return 0;
85}
86
87void kvm_arch_hardware_disable(void *garbage)
88{
89}
90
91int kvm_arch_hardware_setup(void)
92{
93 return 0;
94}
95
96void kvm_arch_hardware_unsetup(void)
97{
98}
99
100void kvm_arch_check_processor_compat(void *rtn)
101{
102 int *r = (int *)rtn;
103 *r = 0;
104 return;
105}
106
107static void kvm_mips_init_tlbs(struct kvm *kvm)
108{
109 unsigned long wired;
110
111 /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
112 wired = read_c0_wired();
113 write_c0_wired(wired + 1);
114 mtc0_tlbw_hazard();
115 kvm->arch.commpage_tlb = wired;
116
117 kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
118 kvm->arch.commpage_tlb);
119}
120
121static void kvm_mips_init_vm_percpu(void *arg)
122{
123 struct kvm *kvm = (struct kvm *)arg;
124
125 kvm_mips_init_tlbs(kvm);
126 kvm_mips_callbacks->vm_init(kvm);
127
128}
129
130int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
131{
132 if (atomic_inc_return(&kvm_mips_instance) == 1) {
133 kvm_info("%s: 1st KVM instance, setup host TLB parameters\n",
134 __func__);
135 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
136 }
137
138
139 return 0;
140}
141
142void kvm_mips_free_vcpus(struct kvm *kvm)
143{
144 unsigned int i;
145 struct kvm_vcpu *vcpu;
146
147 /* Put the pages we reserved for the guest pmap */
148 for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
149 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
150 kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
151 }
152
153 if (kvm->arch.guest_pmap)
154 kfree(kvm->arch.guest_pmap);
155
156 kvm_for_each_vcpu(i, vcpu, kvm) {
157 kvm_arch_vcpu_free(vcpu);
158 }
159
160 mutex_lock(&kvm->lock);
161
162 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
163 kvm->vcpus[i] = NULL;
164
165 atomic_set(&kvm->online_vcpus, 0);
166
167 mutex_unlock(&kvm->lock);
168}
169
170void kvm_arch_sync_events(struct kvm *kvm)
171{
172}
173
174static void kvm_mips_uninit_tlbs(void *arg)
175{
176 /* Restore wired count */
177 write_c0_wired(0);
178 mtc0_tlbw_hazard();
179 /* Clear out all the TLBs */
180 kvm_local_flush_tlb_all();
181}
182
183void kvm_arch_destroy_vm(struct kvm *kvm)
184{
185 kvm_mips_free_vcpus(kvm);
186
187 /* If this is the last instance, restore wired count */
188 if (atomic_dec_return(&kvm_mips_instance) == 0) {
189 kvm_info("%s: last KVM instance, restoring TLB parameters\n",
190 __func__);
191 on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
192 }
193}
194
195long
196kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
197{
198 return -EINVAL;
199}
200
201void kvm_arch_free_memslot(struct kvm_memory_slot *free,
202 struct kvm_memory_slot *dont)
203{
204}
205
206int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
207{
208 return 0;
209}
210
211int kvm_arch_prepare_memory_region(struct kvm *kvm,
212 struct kvm_memory_slot *memslot,
213 struct kvm_userspace_memory_region *mem,
214 enum kvm_mr_change change)
215{
216 return 0;
217}
218
219void kvm_arch_commit_memory_region(struct kvm *kvm,
220 struct kvm_userspace_memory_region *mem,
221 const struct kvm_memory_slot *old,
222 enum kvm_mr_change change)
223{
224 unsigned long npages = 0;
225 int i, err = 0;
226
227 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
228 __func__, kvm, mem->slot, mem->guest_phys_addr,
229 mem->memory_size, mem->userspace_addr);
230
231 /* Setup Guest PMAP table */
232 if (!kvm->arch.guest_pmap) {
233 if (mem->slot == 0)
234 npages = mem->memory_size >> PAGE_SHIFT;
235
236 if (npages) {
237 kvm->arch.guest_pmap_npages = npages;
238 kvm->arch.guest_pmap =
239 kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
240
241 if (!kvm->arch.guest_pmap) {
242 kvm_err("Failed to allocate guest PMAP");
243 err = -ENOMEM;
244 goto out;
245 }
246
247 kvm_info
248 ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
249 npages, kvm->arch.guest_pmap);
250
251 /* Now setup the page table */
252 for (i = 0; i < npages; i++) {
253 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
254 }
255 }
256 }
257out:
258 return;
259}
260
261void kvm_arch_flush_shadow_all(struct kvm *kvm)
262{
263}
264
265void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
266 struct kvm_memory_slot *slot)
267{
268}
269
270void kvm_arch_flush_shadow(struct kvm *kvm)
271{
272}
273
274struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
275{
276 extern char mips32_exception[], mips32_exceptionEnd[];
277 extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
278 int err, size, offset;
279 void *gebase;
280 int i;
281
282 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
283
284 if (!vcpu) {
285 err = -ENOMEM;
286 goto out;
287 }
288
289 err = kvm_vcpu_init(vcpu, kvm, id);
290
291 if (err)
292 goto out_free_cpu;
293
294 kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
295
296 /* Allocate space for host mode exception handlers that handle
297 * guest mode exits
298 */
299 if (cpu_has_veic || cpu_has_vint) {
300 size = 0x200 + VECTORSPACING * 64;
301 } else {
302 size = 0x200;
303 }
304
305 /* Save Linux EBASE */
306 vcpu->arch.host_ebase = (void *)read_c0_ebase();
307
308 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
309
310 if (!gebase) {
311 err = -ENOMEM;
312 goto out_free_cpu;
313 }
314 kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
315 ALIGN(size, PAGE_SIZE), gebase);
316
317 /* Save new ebase */
318 vcpu->arch.guest_ebase = gebase;
319
320 /* Copy L1 Guest Exception handler to correct offset */
321
322 /* TLB Refill, EXL = 0 */
323 memcpy(gebase, mips32_exception,
324 mips32_exceptionEnd - mips32_exception);
325
326 /* General Exception Entry point */
327 memcpy(gebase + 0x180, mips32_exception,
328 mips32_exceptionEnd - mips32_exception);
329
330 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
331 for (i = 0; i < 8; i++) {
332 kvm_debug("L1 Vectored handler @ %p\n",
333 gebase + 0x200 + (i * VECTORSPACING));
334 memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
335 mips32_exceptionEnd - mips32_exception);
336 }
337
338 /* General handler, relocate to unmapped space for sanity's sake */
339 offset = 0x2000;
340 kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n",
341 gebase + offset,
342 mips32_GuestExceptionEnd - mips32_GuestException);
343
344 memcpy(gebase + offset, mips32_GuestException,
345 mips32_GuestExceptionEnd - mips32_GuestException);
346
347 /* Invalidate the icache for these ranges */
348 mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
349
350 /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
351 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
352
353 if (!vcpu->arch.kseg0_commpage) {
354 err = -ENOMEM;
355 goto out_free_gebase;
356 }
357
358 kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
359 kvm_mips_commpage_init(vcpu);
360
361 /* Init */
362 vcpu->arch.last_sched_cpu = -1;
363
364 /* Start off the timer */
365 kvm_mips_emulate_count(vcpu);
366
367 return vcpu;
368
369out_free_gebase:
370 kfree(gebase);
371
372out_free_cpu:
373 kfree(vcpu);
374
375out:
376 return ERR_PTR(err);
377}
378
379void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
380{
381 hrtimer_cancel(&vcpu->arch.comparecount_timer);
382
383 kvm_vcpu_uninit(vcpu);
384
385 kvm_mips_dump_stats(vcpu);
386
387 if (vcpu->arch.guest_ebase)
388 kfree(vcpu->arch.guest_ebase);
389
390 if (vcpu->arch.kseg0_commpage)
391 kfree(vcpu->arch.kseg0_commpage);
392
393}
394
395void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
396{
397 kvm_arch_vcpu_free(vcpu);
398}
399
400int
401kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
402 struct kvm_guest_debug *dbg)
403{
404 return -EINVAL;
405}
406
407int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
408{
409 int r = 0;
410 sigset_t sigsaved;
411
412 if (vcpu->sigset_active)
413 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
414
415 if (vcpu->mmio_needed) {
416 if (!vcpu->mmio_is_write)
417 kvm_mips_complete_mmio_load(vcpu, run);
418 vcpu->mmio_needed = 0;
419 }
420
421 /* Check if we have any exceptions/interrupts pending */
422 kvm_mips_deliver_interrupts(vcpu,
423 kvm_read_c0_guest_cause(vcpu->arch.cop0));
424
425 local_irq_disable();
426 kvm_guest_enter();
427
428 r = __kvm_mips_vcpu_run(run, vcpu);
429
430 kvm_guest_exit();
431 local_irq_enable();
432
433 if (vcpu->sigset_active)
434 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
435
436 return r;
437}
438
439int
440kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
441{
442 int intr = (int)irq->irq;
443 struct kvm_vcpu *dvcpu = NULL;
444
445 if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
446 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
447 (int)intr);
448
449 if (irq->cpu == -1)
450 dvcpu = vcpu;
451 else
452 dvcpu = vcpu->kvm->vcpus[irq->cpu];
453
454 if (intr == 2 || intr == 3 || intr == 4) {
455 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
456
457 } else if (intr == -2 || intr == -3 || intr == -4) {
458 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
459 } else {
460 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
461 irq->cpu, irq->irq);
462 return -EINVAL;
463 }
464
465 dvcpu->arch.wait = 0;
466
467 if (waitqueue_active(&dvcpu->wq)) {
468 wake_up_interruptible(&dvcpu->wq);
469 }
470
471 return 0;
472}
473
474int
475kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
476 struct kvm_mp_state *mp_state)
477{
478 return -EINVAL;
479}
480
481int
482kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
483 struct kvm_mp_state *mp_state)
484{
485 return -EINVAL;
486}
487
488long
489kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
490{
491 struct kvm_vcpu *vcpu = filp->private_data;
492 void __user *argp = (void __user *)arg;
493 long r;
494 int intr;
495
496 switch (ioctl) {
497 case KVM_NMI:
498 /* Treat the NMI as a CPU reset */
499 r = kvm_mips_reset_vcpu(vcpu);
500 break;
501 case KVM_INTERRUPT:
502 {
503 struct kvm_mips_interrupt irq;
504 r = -EFAULT;
505 if (copy_from_user(&irq, argp, sizeof(irq)))
506 goto out;
507
508 intr = (int)irq.irq;
509
510 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
511 irq.irq);
512
513 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
514 break;
515 }
516 default:
517 r = -EINVAL;
518 }
519
520out:
521 return r;
522}
523
524/*
525 * Get (and clear) the dirty memory log for a memory slot.
526 */
527int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
528{
529 struct kvm_memory_slot *memslot;
530 unsigned long ga, ga_end;
531 int is_dirty = 0;
532 int r;
533 unsigned long n;
534
535 mutex_lock(&kvm->slots_lock);
536
537 r = kvm_get_dirty_log(kvm, log, &is_dirty);
538 if (r)
539 goto out;
540
541 /* If nothing is dirty, don't bother messing with page tables. */
542 if (is_dirty) {
543 memslot = &kvm->memslots->memslots[log->slot];
544
545 ga = memslot->base_gfn << PAGE_SHIFT;
546 ga_end = ga + (memslot->npages << PAGE_SHIFT);
547
548 printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
549 ga_end);
550
551 n = kvm_dirty_bitmap_bytes(memslot);
552 memset(memslot->dirty_bitmap, 0, n);
553 }
554
555 r = 0;
556out:
557 mutex_unlock(&kvm->slots_lock);
558 return r;
559
560}
561
562long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
563{
564 long r;
565
566 switch (ioctl) {
567 default:
568 r = -EINVAL;
569 }
570
571 return r;
572}
573
574int kvm_arch_init(void *opaque)
575{
576 int ret;
577
578 if (kvm_mips_callbacks) {
579 kvm_err("kvm: module already exists\n");
580 return -EEXIST;
581 }
582
583 ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
584
585 return ret;
586}
587
588void kvm_arch_exit(void)
589{
590 kvm_mips_callbacks = NULL;
591}
592
593int
594kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
595{
596 return -ENOTSUPP;
597}
598
599int
600kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
601{
602 return -ENOTSUPP;
603}
604
605int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
606{
607 return 0;
608}
609
610int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
611{
612 return -ENOTSUPP;
613}
614
615int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
616{
617 return -ENOTSUPP;
618}
619
620int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
621{
622 return VM_FAULT_SIGBUS;
623}
624
625int kvm_dev_ioctl_check_extension(long ext)
626{
627 int r;
628
629 switch (ext) {
630 case KVM_CAP_COALESCED_MMIO:
631 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
632 break;
633 default:
634 r = 0;
635 break;
636 }
637 return r;
638
639}
640
641int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
642{
643 return kvm_mips_pending_timer(vcpu);
644}
645
646int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
647{
648 int i;
649 struct mips_coproc *cop0;
650
651 if (!vcpu)
652 return -1;
653
654 printk("VCPU Register Dump:\n");
655 printk("\tpc = 0x%08lx\n", vcpu->arch.pc);;
656 printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
657
658 for (i = 0; i < 32; i += 4) {
659 printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
660 vcpu->arch.gprs[i],
661 vcpu->arch.gprs[i + 1],
662 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
663 }
664 printk("\thi: 0x%08lx\n", vcpu->arch.hi);
665 printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
666
667 cop0 = vcpu->arch.cop0;
668 printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
669 kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
670
671 printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
672
673 return 0;
674}
675
676int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
677{
678 int i;
679
680 for (i = 0; i < 32; i++)
681 vcpu->arch.gprs[i] = regs->gprs[i];
682
683 vcpu->arch.hi = regs->hi;
684 vcpu->arch.lo = regs->lo;
685 vcpu->arch.pc = regs->pc;
686
687 return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs);
688}
689
690int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
691{
692 int i;
693
694 for (i = 0; i < 32; i++)
695 regs->gprs[i] = vcpu->arch.gprs[i];
696
697 regs->hi = vcpu->arch.hi;
698 regs->lo = vcpu->arch.lo;
699 regs->pc = vcpu->arch.pc;
700
701 return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs);
702}
703
704void kvm_mips_comparecount_func(unsigned long data)
705{
706 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
707
708 kvm_mips_callbacks->queue_timer_int(vcpu);
709
710 vcpu->arch.wait = 0;
711 if (waitqueue_active(&vcpu->wq)) {
712 wake_up_interruptible(&vcpu->wq);
713 }
714}
715
716/*
717 * low level hrtimer wake routine.
718 */
719enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
720{
721 struct kvm_vcpu *vcpu;
722
723 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
724 kvm_mips_comparecount_func((unsigned long) vcpu);
725 hrtimer_forward_now(&vcpu->arch.comparecount_timer,
726 ktime_set(0, MS_TO_NS(10)));
727 return HRTIMER_RESTART;
728}
729
730int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
731{
732 kvm_mips_callbacks->vcpu_init(vcpu);
733 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
734 HRTIMER_MODE_REL);
735 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
736 kvm_mips_init_shadow_tlb(vcpu);
737 return 0;
738}
739
740void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
741{
742 return;
743}
744
745int
746kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
747{
748 return 0;
749}
750
751/* Initial guest state */
752int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
753{
754 return kvm_mips_callbacks->vcpu_setup(vcpu);
755}
756
757static
758void kvm_mips_set_c0_status(void)
759{
760 uint32_t status = read_c0_status();
761
762 if (cpu_has_fpu)
763 status |= (ST0_CU1);
764
765 if (cpu_has_dsp)
766 status |= (ST0_MX);
767
768 write_c0_status(status);
769 ehb();
770}
771
772/*
773 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
774 */
775int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
776{
777 uint32_t cause = vcpu->arch.host_cp0_cause;
778 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
779 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
780 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
781 enum emulation_result er = EMULATE_DONE;
782 int ret = RESUME_GUEST;
783
784 /* Set a default exit reason */
785 run->exit_reason = KVM_EXIT_UNKNOWN;
786 run->ready_for_interrupt_injection = 1;
787
788 /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
789 kvm_mips_set_c0_status();
790
791 local_irq_enable();
792
793 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
794 cause, opc, run, vcpu);
795
796 /* Do a privilege check, if in UM most of these exit conditions end up
797 * causing an exception to be delivered to the Guest Kernel
798 */
799 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
800 if (er == EMULATE_PRIV_FAIL) {
801 goto skip_emul;
802 } else if (er == EMULATE_FAIL) {
803 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
804 ret = RESUME_HOST;
805 goto skip_emul;
806 }
807
808 switch (exccode) {
809 case T_INT:
810 kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
811
812 ++vcpu->stat.int_exits;
813 trace_kvm_exit(vcpu, INT_EXITS);
814
815 if (need_resched()) {
816 cond_resched();
817 }
818
819 ret = RESUME_GUEST;
820 break;
821
822 case T_COP_UNUSABLE:
823 kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
824
825 ++vcpu->stat.cop_unusable_exits;
826 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
827 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
828 /* XXXKYMA: Might need to return to user space */
829 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
830 ret = RESUME_HOST;
831 }
832 break;
833
834 case T_TLB_MOD:
835 ++vcpu->stat.tlbmod_exits;
836 trace_kvm_exit(vcpu, TLBMOD_EXITS);
837 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
838 break;
839
840 case T_TLB_ST_MISS:
841 kvm_debug
842 ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
843 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
844 badvaddr);
845
846 ++vcpu->stat.tlbmiss_st_exits;
847 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
848 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
849 break;
850
851 case T_TLB_LD_MISS:
852 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
853 cause, opc, badvaddr);
854
855 ++vcpu->stat.tlbmiss_ld_exits;
856 trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
857 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
858 break;
859
860 case T_ADDR_ERR_ST:
861 ++vcpu->stat.addrerr_st_exits;
862 trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
863 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
864 break;
865
866 case T_ADDR_ERR_LD:
867 ++vcpu->stat.addrerr_ld_exits;
868 trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
869 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
870 break;
871
872 case T_SYSCALL:
873 ++vcpu->stat.syscall_exits;
874 trace_kvm_exit(vcpu, SYSCALL_EXITS);
875 ret = kvm_mips_callbacks->handle_syscall(vcpu);
876 break;
877
878 case T_RES_INST:
879 ++vcpu->stat.resvd_inst_exits;
880 trace_kvm_exit(vcpu, RESVD_INST_EXITS);
881 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
882 break;
883
884 case T_BREAK:
885 ++vcpu->stat.break_inst_exits;
886 trace_kvm_exit(vcpu, BREAK_INST_EXITS);
887 ret = kvm_mips_callbacks->handle_break(vcpu);
888 break;
889
890 default:
891 kvm_err
892 ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
893 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
894 kvm_read_c0_guest_status(vcpu->arch.cop0));
895 kvm_arch_vcpu_dump_regs(vcpu);
896 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
897 ret = RESUME_HOST;
898 break;
899
900 }
901
902skip_emul:
903 local_irq_disable();
904
905 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
906 kvm_mips_deliver_interrupts(vcpu, cause);
907
908 if (!(ret & RESUME_HOST)) {
909 /* Only check for signals if not already exiting to userspace */
910 if (signal_pending(current)) {
911 run->exit_reason = KVM_EXIT_INTR;
912 ret = (-EINTR << 2) | RESUME_HOST;
913 ++vcpu->stat.signal_exits;
914 trace_kvm_exit(vcpu, SIGNAL_EXITS);
915 }
916 }
917
918 return ret;
919}
920
921int __init kvm_mips_init(void)
922{
923 int ret;
924
925 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
926
927 if (ret)
928 return ret;
929
930 /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
931 * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
932 * to avoid the possibility of double faulting. The issue is that the TLB code
933 * references routines that are part of the the KVM module,
934 * which are only available once the module is loaded.
935 */
936 kvm_mips_gfn_to_pfn = gfn_to_pfn;
937 kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
938 kvm_mips_is_error_pfn = is_error_pfn;
939
940 pr_info("KVM/MIPS Initialized\n");
941 return 0;
942}
943
944void __exit kvm_mips_exit(void)
945{
946 kvm_exit();
947
948 kvm_mips_gfn_to_pfn = NULL;
949 kvm_mips_release_pfn_clean = NULL;
950 kvm_mips_is_error_pfn = NULL;
951
952 pr_info("KVM/MIPS unloaded\n");
953}
954
955module_init(kvm_mips_init);
956module_exit(kvm_mips_exit);
957
958EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h
new file mode 100644
index 000000000000..a4a8c85cc8f7
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_comm.h
@@ -0,0 +1,23 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: commpage: mapped into get kernel space
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#ifndef __KVM_MIPS_COMMPAGE_H__
13#define __KVM_MIPS_COMMPAGE_H__
14
15struct kvm_mips_commpage {
16 struct mips_coproc cop0; /* COP0 state is mapped into Guest kernel via commpage */
17};
18
19#define KVM_MIPS_COMM_EIDI_OFFSET 0x0
20
21extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
22
23#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c
new file mode 100644
index 000000000000..3873b1ecc40f
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_commpage.c
@@ -0,0 +1,37 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* commpage, currently used for Virtual COP0 registers.
7* Mapped into the guest kernel @ 0x0.
8*
9* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10* Authors: Sanjay Lal <sanjayl@kymasys.com>
11*/
12
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/module.h>
16#include <linux/vmalloc.h>
17#include <linux/fs.h>
18#include <linux/bootmem.h>
19#include <asm/page.h>
20#include <asm/cacheflush.h>
21#include <asm/mmu_context.h>
22
23#include <linux/kvm_host.h>
24
25#include "kvm_mips_comm.h"
26
27void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
28{
29 struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
30 memset(page, 0, sizeof(struct kvm_mips_commpage));
31
32 /* Specific init values for fields */
33 vcpu->arch.cop0 = &page->cop0;
34 memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc));
35
36 return;
37}
diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/kvm_mips_dyntrans.c
new file mode 100644
index 000000000000..96528e2d1ea6
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_dyntrans.c
@@ -0,0 +1,149 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/kvm_host.h>
15#include <linux/module.h>
16#include <linux/vmalloc.h>
17#include <linux/fs.h>
18#include <linux/bootmem.h>
19
20#include "kvm_mips_comm.h"
21
22#define SYNCI_TEMPLATE 0x041f0000
23#define SYNCI_BASE(x) (((x) >> 21) & 0x1f)
24#define SYNCI_OFFSET ((x) & 0xffff)
25
26#define LW_TEMPLATE 0x8c000000
27#define CLEAR_TEMPLATE 0x00000020
28#define SW_TEMPLATE 0xac000000
29
30int
31kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
32 struct kvm_vcpu *vcpu)
33{
34 int result = 0;
35 unsigned long kseg0_opc;
36 uint32_t synci_inst = 0x0;
37
38 /* Replace the CACHE instruction, with a NOP */
39 kseg0_opc =
40 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
41 (vcpu, (unsigned long) opc));
42 memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
43 mips32_SyncICache(kseg0_opc, 32);
44
45 return result;
46}
47
48/*
49 * Address based CACHE instructions are transformed into synci(s). A little heavy
50 * for just D-cache invalidates, but avoids an expensive trap
51 */
52int
53kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
54 struct kvm_vcpu *vcpu)
55{
56 int result = 0;
57 unsigned long kseg0_opc;
58 uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
59
60 base = (inst >> 21) & 0x1f;
61 offset = inst & 0xffff;
62 synci_inst |= (base << 21);
63 synci_inst |= offset;
64
65 kseg0_opc =
66 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
67 (vcpu, (unsigned long) opc));
68 memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
69 mips32_SyncICache(kseg0_opc, 32);
70
71 return result;
72}
73
74int
75kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
76{
77 int32_t rt, rd, sel;
78 uint32_t mfc0_inst;
79 unsigned long kseg0_opc, flags;
80
81 rt = (inst >> 16) & 0x1f;
82 rd = (inst >> 11) & 0x1f;
83 sel = inst & 0x7;
84
85 if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
86 mfc0_inst = CLEAR_TEMPLATE;
87 mfc0_inst |= ((rt & 0x1f) << 16);
88 } else {
89 mfc0_inst = LW_TEMPLATE;
90 mfc0_inst |= ((rt & 0x1f) << 16);
91 mfc0_inst |=
92 offsetof(struct mips_coproc,
93 reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
94 cop0);
95 }
96
97 if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
98 kseg0_opc =
99 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
100 (vcpu, (unsigned long) opc));
101 memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
102 mips32_SyncICache(kseg0_opc, 32);
103 } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
104 local_irq_save(flags);
105 memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
106 mips32_SyncICache((unsigned long) opc, 32);
107 local_irq_restore(flags);
108 } else {
109 kvm_err("%s: Invalid address: %p\n", __func__, opc);
110 return -EFAULT;
111 }
112
113 return 0;
114}
115
116int
117kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
118{
119 int32_t rt, rd, sel;
120 uint32_t mtc0_inst = SW_TEMPLATE;
121 unsigned long kseg0_opc, flags;
122
123 rt = (inst >> 16) & 0x1f;
124 rd = (inst >> 11) & 0x1f;
125 sel = inst & 0x7;
126
127 mtc0_inst |= ((rt & 0x1f) << 16);
128 mtc0_inst |=
129 offsetof(struct mips_coproc,
130 reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
131
132 if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
133 kseg0_opc =
134 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
135 (vcpu, (unsigned long) opc));
136 memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
137 mips32_SyncICache(kseg0_opc, 32);
138 } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
139 local_irq_save(flags);
140 memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
141 mips32_SyncICache((unsigned long) opc, 32);
142 local_irq_restore(flags);
143 } else {
144 kvm_err("%s: Invalid address: %p\n", __func__, opc);
145 return -EFAULT;
146 }
147
148 return 0;
149}
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
new file mode 100644
index 000000000000..2b2bac9a40aa
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_emul.c
@@ -0,0 +1,1826 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Instruction/Exception emulation
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/kvm_host.h>
15#include <linux/module.h>
16#include <linux/vmalloc.h>
17#include <linux/fs.h>
18#include <linux/bootmem.h>
19#include <linux/random.h>
20#include <asm/page.h>
21#include <asm/cacheflush.h>
22#include <asm/cpu-info.h>
23#include <asm/mmu_context.h>
24#include <asm/tlbflush.h>
25#include <asm/inst.h>
26
27#undef CONFIG_MIPS_MT
28#include <asm/r4kcache.h>
29#define CONFIG_MIPS_MT
30
31#include "kvm_mips_opcode.h"
32#include "kvm_mips_int.h"
33#include "kvm_mips_comm.h"
34
35#include "trace.h"
36
37/*
38 * Compute the return address and do emulate branch simulation, if required.
39 * This function should be called only in branch delay slot active.
40 */
41unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
42 unsigned long instpc)
43{
44 unsigned int dspcontrol;
45 union mips_instruction insn;
46 struct kvm_vcpu_arch *arch = &vcpu->arch;
47 long epc = instpc;
48 long nextpc = KVM_INVALID_INST;
49
50 if (epc & 3)
51 goto unaligned;
52
53 /*
54 * Read the instruction
55 */
56 insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
57
58 if (insn.word == KVM_INVALID_INST)
59 return KVM_INVALID_INST;
60
61 switch (insn.i_format.opcode) {
62 /*
63 * jr and jalr are in r_format format.
64 */
65 case spec_op:
66 switch (insn.r_format.func) {
67 case jalr_op:
68 arch->gprs[insn.r_format.rd] = epc + 8;
69 /* Fall through */
70 case jr_op:
71 nextpc = arch->gprs[insn.r_format.rs];
72 break;
73 }
74 break;
75
76 /*
77 * This group contains:
78 * bltz_op, bgez_op, bltzl_op, bgezl_op,
79 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
80 */
81 case bcond_op:
82 switch (insn.i_format.rt) {
83 case bltz_op:
84 case bltzl_op:
85 if ((long)arch->gprs[insn.i_format.rs] < 0)
86 epc = epc + 4 + (insn.i_format.simmediate << 2);
87 else
88 epc += 8;
89 nextpc = epc;
90 break;
91
92 case bgez_op:
93 case bgezl_op:
94 if ((long)arch->gprs[insn.i_format.rs] >= 0)
95 epc = epc + 4 + (insn.i_format.simmediate << 2);
96 else
97 epc += 8;
98 nextpc = epc;
99 break;
100
101 case bltzal_op:
102 case bltzall_op:
103 arch->gprs[31] = epc + 8;
104 if ((long)arch->gprs[insn.i_format.rs] < 0)
105 epc = epc + 4 + (insn.i_format.simmediate << 2);
106 else
107 epc += 8;
108 nextpc = epc;
109 break;
110
111 case bgezal_op:
112 case bgezall_op:
113 arch->gprs[31] = epc + 8;
114 if ((long)arch->gprs[insn.i_format.rs] >= 0)
115 epc = epc + 4 + (insn.i_format.simmediate << 2);
116 else
117 epc += 8;
118 nextpc = epc;
119 break;
120 case bposge32_op:
121 if (!cpu_has_dsp)
122 goto sigill;
123
124 dspcontrol = rddsp(0x01);
125
126 if (dspcontrol >= 32) {
127 epc = epc + 4 + (insn.i_format.simmediate << 2);
128 } else
129 epc += 8;
130 nextpc = epc;
131 break;
132 }
133 break;
134
135 /*
136 * These are unconditional and in j_format.
137 */
138 case jal_op:
139 arch->gprs[31] = instpc + 8;
140 case j_op:
141 epc += 4;
142 epc >>= 28;
143 epc <<= 28;
144 epc |= (insn.j_format.target << 2);
145 nextpc = epc;
146 break;
147
148 /*
149 * These are conditional and in i_format.
150 */
151 case beq_op:
152 case beql_op:
153 if (arch->gprs[insn.i_format.rs] ==
154 arch->gprs[insn.i_format.rt])
155 epc = epc + 4 + (insn.i_format.simmediate << 2);
156 else
157 epc += 8;
158 nextpc = epc;
159 break;
160
161 case bne_op:
162 case bnel_op:
163 if (arch->gprs[insn.i_format.rs] !=
164 arch->gprs[insn.i_format.rt])
165 epc = epc + 4 + (insn.i_format.simmediate << 2);
166 else
167 epc += 8;
168 nextpc = epc;
169 break;
170
171 case blez_op: /* not really i_format */
172 case blezl_op:
173 /* rt field assumed to be zero */
174 if ((long)arch->gprs[insn.i_format.rs] <= 0)
175 epc = epc + 4 + (insn.i_format.simmediate << 2);
176 else
177 epc += 8;
178 nextpc = epc;
179 break;
180
181 case bgtz_op:
182 case bgtzl_op:
183 /* rt field assumed to be zero */
184 if ((long)arch->gprs[insn.i_format.rs] > 0)
185 epc = epc + 4 + (insn.i_format.simmediate << 2);
186 else
187 epc += 8;
188 nextpc = epc;
189 break;
190
191 /*
192 * And now the FPA/cp1 branch instructions.
193 */
194 case cop1_op:
195 printk("%s: unsupported cop1_op\n", __func__);
196 break;
197 }
198
199 return nextpc;
200
201unaligned:
202 printk("%s: unaligned epc\n", __func__);
203 return nextpc;
204
205sigill:
206 printk("%s: DSP branch but not DSP ASE\n", __func__);
207 return nextpc;
208}
209
210enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
211{
212 unsigned long branch_pc;
213 enum emulation_result er = EMULATE_DONE;
214
215 if (cause & CAUSEF_BD) {
216 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
217 if (branch_pc == KVM_INVALID_INST) {
218 er = EMULATE_FAIL;
219 } else {
220 vcpu->arch.pc = branch_pc;
221 kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
222 }
223 } else
224 vcpu->arch.pc += 4;
225
226 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
227
228 return er;
229}
230
231/* Everytime the compare register is written to, we need to decide when to fire
232 * the timer that represents timer ticks to the GUEST.
233 *
234 */
235enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu)
236{
237 struct mips_coproc *cop0 = vcpu->arch.cop0;
238 enum emulation_result er = EMULATE_DONE;
239
240 /* If COUNT is enabled */
241 if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) {
242 hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
243 hrtimer_start(&vcpu->arch.comparecount_timer,
244 ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL);
245 } else {
246 hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
247 }
248
249 return er;
250}
251
252enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
253{
254 struct mips_coproc *cop0 = vcpu->arch.cop0;
255 enum emulation_result er = EMULATE_DONE;
256
257 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
258 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
259 kvm_read_c0_guest_epc(cop0));
260 kvm_clear_c0_guest_status(cop0, ST0_EXL);
261 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
262
263 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
264 kvm_clear_c0_guest_status(cop0, ST0_ERL);
265 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
266 } else {
267 printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
268 vcpu->arch.pc);
269 er = EMULATE_FAIL;
270 }
271
272 return er;
273}
274
275enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
276{
277 enum emulation_result er = EMULATE_DONE;
278
279 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
280 vcpu->arch.pending_exceptions);
281
282 ++vcpu->stat.wait_exits;
283 trace_kvm_exit(vcpu, WAIT_EXITS);
284 if (!vcpu->arch.pending_exceptions) {
285 vcpu->arch.wait = 1;
286 kvm_vcpu_block(vcpu);
287
288 /* We we are runnable, then definitely go off to user space to check if any
289 * I/O interrupts are pending.
290 */
291 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
292 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
293 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
294 }
295 }
296
297 return er;
298}
299
300/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
301 * this, if things ever change
302 */
303enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
304{
305 struct mips_coproc *cop0 = vcpu->arch.cop0;
306 enum emulation_result er = EMULATE_FAIL;
307 uint32_t pc = vcpu->arch.pc;
308
309 printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
310 return er;
311}
312
313/* Write Guest TLB Entry @ Index */
314enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
315{
316 struct mips_coproc *cop0 = vcpu->arch.cop0;
317 int index = kvm_read_c0_guest_index(cop0);
318 enum emulation_result er = EMULATE_DONE;
319 struct kvm_mips_tlb *tlb = NULL;
320 uint32_t pc = vcpu->arch.pc;
321
322 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
323 printk("%s: illegal index: %d\n", __func__, index);
324 printk
325 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
326 pc, index, kvm_read_c0_guest_entryhi(cop0),
327 kvm_read_c0_guest_entrylo0(cop0),
328 kvm_read_c0_guest_entrylo1(cop0),
329 kvm_read_c0_guest_pagemask(cop0));
330 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
331 }
332
333 tlb = &vcpu->arch.guest_tlb[index];
334#if 1
335 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
336 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
337#endif
338
339 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
340 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
341 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
342 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
343
344 kvm_debug
345 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
346 pc, index, kvm_read_c0_guest_entryhi(cop0),
347 kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
348 kvm_read_c0_guest_pagemask(cop0));
349
350 return er;
351}
352
353/* Write Guest TLB Entry @ Random Index */
354enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
355{
356 struct mips_coproc *cop0 = vcpu->arch.cop0;
357 enum emulation_result er = EMULATE_DONE;
358 struct kvm_mips_tlb *tlb = NULL;
359 uint32_t pc = vcpu->arch.pc;
360 int index;
361
362#if 1
363 get_random_bytes(&index, sizeof(index));
364 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
365#else
366 index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
367#endif
368
369 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
370 printk("%s: illegal index: %d\n", __func__, index);
371 return EMULATE_FAIL;
372 }
373
374 tlb = &vcpu->arch.guest_tlb[index];
375
376#if 1
377 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
378 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
379#endif
380
381 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
382 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
383 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
384 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
385
386 kvm_debug
387 ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
388 pc, index, kvm_read_c0_guest_entryhi(cop0),
389 kvm_read_c0_guest_entrylo0(cop0),
390 kvm_read_c0_guest_entrylo1(cop0));
391
392 return er;
393}
394
395enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
396{
397 struct mips_coproc *cop0 = vcpu->arch.cop0;
398 long entryhi = kvm_read_c0_guest_entryhi(cop0);
399 enum emulation_result er = EMULATE_DONE;
400 uint32_t pc = vcpu->arch.pc;
401 int index = -1;
402
403 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
404
405 kvm_write_c0_guest_index(cop0, index);
406
407 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
408 index);
409
410 return er;
411}
412
413enum emulation_result
414kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
415 struct kvm_run *run, struct kvm_vcpu *vcpu)
416{
417 struct mips_coproc *cop0 = vcpu->arch.cop0;
418 enum emulation_result er = EMULATE_DONE;
419 int32_t rt, rd, copz, sel, co_bit, op;
420 uint32_t pc = vcpu->arch.pc;
421 unsigned long curr_pc;
422
423 /*
424 * Update PC and hold onto current PC in case there is
425 * an error and we want to rollback the PC
426 */
427 curr_pc = vcpu->arch.pc;
428 er = update_pc(vcpu, cause);
429 if (er == EMULATE_FAIL) {
430 return er;
431 }
432
433 copz = (inst >> 21) & 0x1f;
434 rt = (inst >> 16) & 0x1f;
435 rd = (inst >> 11) & 0x1f;
436 sel = inst & 0x7;
437 co_bit = (inst >> 25) & 1;
438
439 /* Verify that the register is valid */
440 if (rd > MIPS_CP0_DESAVE) {
441 printk("Invalid rd: %d\n", rd);
442 er = EMULATE_FAIL;
443 goto done;
444 }
445
446 if (co_bit) {
447 op = (inst) & 0xff;
448
449 switch (op) {
450 case tlbr_op: /* Read indexed TLB entry */
451 er = kvm_mips_emul_tlbr(vcpu);
452 break;
453 case tlbwi_op: /* Write indexed */
454 er = kvm_mips_emul_tlbwi(vcpu);
455 break;
456 case tlbwr_op: /* Write random */
457 er = kvm_mips_emul_tlbwr(vcpu);
458 break;
459 case tlbp_op: /* TLB Probe */
460 er = kvm_mips_emul_tlbp(vcpu);
461 break;
462 case rfe_op:
463 printk("!!!COP0_RFE!!!\n");
464 break;
465 case eret_op:
466 er = kvm_mips_emul_eret(vcpu);
467 goto dont_update_pc;
468 break;
469 case wait_op:
470 er = kvm_mips_emul_wait(vcpu);
471 break;
472 }
473 } else {
474 switch (copz) {
475 case mfc_op:
476#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
477 cop0->stat[rd][sel]++;
478#endif
479 /* Get reg */
480 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
481 /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
482 vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
483 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
484 vcpu->arch.gprs[rt] = 0x0;
485#ifdef CONFIG_KVM_MIPS_DYN_TRANS
486 kvm_mips_trans_mfc0(inst, opc, vcpu);
487#endif
488 }
489 else {
490 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
491
492#ifdef CONFIG_KVM_MIPS_DYN_TRANS
493 kvm_mips_trans_mfc0(inst, opc, vcpu);
494#endif
495 }
496
497 kvm_debug
498 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
499 pc, rd, sel, rt, vcpu->arch.gprs[rt]);
500
501 break;
502
503 case dmfc_op:
504 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
505 break;
506
507 case mtc_op:
508#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
509 cop0->stat[rd][sel]++;
510#endif
511 if ((rd == MIPS_CP0_TLB_INDEX)
512 && (vcpu->arch.gprs[rt] >=
513 KVM_MIPS_GUEST_TLB_SIZE)) {
514 printk("Invalid TLB Index: %ld",
515 vcpu->arch.gprs[rt]);
516 er = EMULATE_FAIL;
517 break;
518 }
519#define C0_EBASE_CORE_MASK 0xff
520 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
521 /* Preserve CORE number */
522 kvm_change_c0_guest_ebase(cop0,
523 ~(C0_EBASE_CORE_MASK),
524 vcpu->arch.gprs[rt]);
525 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
526 kvm_read_c0_guest_ebase(cop0));
527 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
528 uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]);
529 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
530 &&
531 (ASID_MASK(kvm_read_c0_guest_entryhi(cop0))
532 != nasid)) {
533
534 kvm_debug
535 ("MTCz, change ASID from %#lx to %#lx\n",
536 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)),
537 ASID_MASK(vcpu->arch.gprs[rt]));
538
539 /* Blow away the shadow host TLBs */
540 kvm_mips_flush_host_tlb(1);
541 }
542 kvm_write_c0_guest_entryhi(cop0,
543 vcpu->arch.gprs[rt]);
544 }
545 /* Are we writing to COUNT */
546 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
547 /* Linux doesn't seem to write into COUNT, we throw an error
548 * if we notice a write to COUNT
549 */
550 /*er = EMULATE_FAIL; */
551 goto done;
552 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
553 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
554 pc, kvm_read_c0_guest_compare(cop0),
555 vcpu->arch.gprs[rt]);
556
557 /* If we are writing to COMPARE */
558 /* Clear pending timer interrupt, if any */
559 kvm_mips_callbacks->dequeue_timer_int(vcpu);
560 kvm_write_c0_guest_compare(cop0,
561 vcpu->arch.gprs[rt]);
562 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
563 kvm_write_c0_guest_status(cop0,
564 vcpu->arch.gprs[rt]);
565 /* Make sure that CU1 and NMI bits are never set */
566 kvm_clear_c0_guest_status(cop0,
567 (ST0_CU1 | ST0_NMI));
568
569#ifdef CONFIG_KVM_MIPS_DYN_TRANS
570 kvm_mips_trans_mtc0(inst, opc, vcpu);
571#endif
572 } else {
573 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
574#ifdef CONFIG_KVM_MIPS_DYN_TRANS
575 kvm_mips_trans_mtc0(inst, opc, vcpu);
576#endif
577 }
578
579 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
580 rd, sel, cop0->reg[rd][sel]);
581 break;
582
583 case dmtc_op:
584 printk
585 ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
586 vcpu->arch.pc, rt, rd, sel);
587 er = EMULATE_FAIL;
588 break;
589
590 case mfmcz_op:
591#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
592 cop0->stat[MIPS_CP0_STATUS][0]++;
593#endif
594 if (rt != 0) {
595 vcpu->arch.gprs[rt] =
596 kvm_read_c0_guest_status(cop0);
597 }
598 /* EI */
599 if (inst & 0x20) {
600 kvm_debug("[%#lx] mfmcz_op: EI\n",
601 vcpu->arch.pc);
602 kvm_set_c0_guest_status(cop0, ST0_IE);
603 } else {
604 kvm_debug("[%#lx] mfmcz_op: DI\n",
605 vcpu->arch.pc);
606 kvm_clear_c0_guest_status(cop0, ST0_IE);
607 }
608
609 break;
610
611 case wrpgpr_op:
612 {
613 uint32_t css =
614 cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
615 uint32_t pss =
616 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
617 /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
618 if (css || pss) {
619 er = EMULATE_FAIL;
620 break;
621 }
622 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
623 vcpu->arch.gprs[rt]);
624 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
625 }
626 break;
627 default:
628 printk
629 ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
630 vcpu->arch.pc, copz);
631 er = EMULATE_FAIL;
632 break;
633 }
634 }
635
636done:
637 /*
638 * Rollback PC only if emulation was unsuccessful
639 */
640 if (er == EMULATE_FAIL) {
641 vcpu->arch.pc = curr_pc;
642 }
643
644dont_update_pc:
645 /*
646 * This is for special instructions whose emulation
647 * updates the PC, so do not overwrite the PC under
648 * any circumstances
649 */
650
651 return er;
652}
653
654enum emulation_result
655kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
656 struct kvm_run *run, struct kvm_vcpu *vcpu)
657{
658 enum emulation_result er = EMULATE_DO_MMIO;
659 int32_t op, base, rt, offset;
660 uint32_t bytes;
661 void *data = run->mmio.data;
662 unsigned long curr_pc;
663
664 /*
665 * Update PC and hold onto current PC in case there is
666 * an error and we want to rollback the PC
667 */
668 curr_pc = vcpu->arch.pc;
669 er = update_pc(vcpu, cause);
670 if (er == EMULATE_FAIL)
671 return er;
672
673 rt = (inst >> 16) & 0x1f;
674 base = (inst >> 21) & 0x1f;
675 offset = inst & 0xffff;
676 op = (inst >> 26) & 0x3f;
677
678 switch (op) {
679 case sb_op:
680 bytes = 1;
681 if (bytes > sizeof(run->mmio.data)) {
682 kvm_err("%s: bad MMIO length: %d\n", __func__,
683 run->mmio.len);
684 }
685 run->mmio.phys_addr =
686 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
687 host_cp0_badvaddr);
688 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
689 er = EMULATE_FAIL;
690 break;
691 }
692 run->mmio.len = bytes;
693 run->mmio.is_write = 1;
694 vcpu->mmio_needed = 1;
695 vcpu->mmio_is_write = 1;
696 *(u8 *) data = vcpu->arch.gprs[rt];
697 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
698 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
699 *(uint8_t *) data);
700
701 break;
702
703 case sw_op:
704 bytes = 4;
705 if (bytes > sizeof(run->mmio.data)) {
706 kvm_err("%s: bad MMIO length: %d\n", __func__,
707 run->mmio.len);
708 }
709 run->mmio.phys_addr =
710 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
711 host_cp0_badvaddr);
712 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
713 er = EMULATE_FAIL;
714 break;
715 }
716
717 run->mmio.len = bytes;
718 run->mmio.is_write = 1;
719 vcpu->mmio_needed = 1;
720 vcpu->mmio_is_write = 1;
721 *(uint32_t *) data = vcpu->arch.gprs[rt];
722
723 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
724 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
725 vcpu->arch.gprs[rt], *(uint32_t *) data);
726 break;
727
728 case sh_op:
729 bytes = 2;
730 if (bytes > sizeof(run->mmio.data)) {
731 kvm_err("%s: bad MMIO length: %d\n", __func__,
732 run->mmio.len);
733 }
734 run->mmio.phys_addr =
735 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
736 host_cp0_badvaddr);
737 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
738 er = EMULATE_FAIL;
739 break;
740 }
741
742 run->mmio.len = bytes;
743 run->mmio.is_write = 1;
744 vcpu->mmio_needed = 1;
745 vcpu->mmio_is_write = 1;
746 *(uint16_t *) data = vcpu->arch.gprs[rt];
747
748 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
749 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
750 vcpu->arch.gprs[rt], *(uint32_t *) data);
751 break;
752
753 default:
754 printk("Store not yet supported");
755 er = EMULATE_FAIL;
756 break;
757 }
758
759 /*
760 * Rollback PC if emulation was unsuccessful
761 */
762 if (er == EMULATE_FAIL) {
763 vcpu->arch.pc = curr_pc;
764 }
765
766 return er;
767}
768
769enum emulation_result
770kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
771 struct kvm_run *run, struct kvm_vcpu *vcpu)
772{
773 enum emulation_result er = EMULATE_DO_MMIO;
774 int32_t op, base, rt, offset;
775 uint32_t bytes;
776
777 rt = (inst >> 16) & 0x1f;
778 base = (inst >> 21) & 0x1f;
779 offset = inst & 0xffff;
780 op = (inst >> 26) & 0x3f;
781
782 vcpu->arch.pending_load_cause = cause;
783 vcpu->arch.io_gpr = rt;
784
785 switch (op) {
786 case lw_op:
787 bytes = 4;
788 if (bytes > sizeof(run->mmio.data)) {
789 kvm_err("%s: bad MMIO length: %d\n", __func__,
790 run->mmio.len);
791 er = EMULATE_FAIL;
792 break;
793 }
794 run->mmio.phys_addr =
795 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
796 host_cp0_badvaddr);
797 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
798 er = EMULATE_FAIL;
799 break;
800 }
801
802 run->mmio.len = bytes;
803 run->mmio.is_write = 0;
804 vcpu->mmio_needed = 1;
805 vcpu->mmio_is_write = 0;
806 break;
807
808 case lh_op:
809 case lhu_op:
810 bytes = 2;
811 if (bytes > sizeof(run->mmio.data)) {
812 kvm_err("%s: bad MMIO length: %d\n", __func__,
813 run->mmio.len);
814 er = EMULATE_FAIL;
815 break;
816 }
817 run->mmio.phys_addr =
818 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
819 host_cp0_badvaddr);
820 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
821 er = EMULATE_FAIL;
822 break;
823 }
824
825 run->mmio.len = bytes;
826 run->mmio.is_write = 0;
827 vcpu->mmio_needed = 1;
828 vcpu->mmio_is_write = 0;
829
830 if (op == lh_op)
831 vcpu->mmio_needed = 2;
832 else
833 vcpu->mmio_needed = 1;
834
835 break;
836
837 case lbu_op:
838 case lb_op:
839 bytes = 1;
840 if (bytes > sizeof(run->mmio.data)) {
841 kvm_err("%s: bad MMIO length: %d\n", __func__,
842 run->mmio.len);
843 er = EMULATE_FAIL;
844 break;
845 }
846 run->mmio.phys_addr =
847 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
848 host_cp0_badvaddr);
849 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
850 er = EMULATE_FAIL;
851 break;
852 }
853
854 run->mmio.len = bytes;
855 run->mmio.is_write = 0;
856 vcpu->mmio_is_write = 0;
857
858 if (op == lb_op)
859 vcpu->mmio_needed = 2;
860 else
861 vcpu->mmio_needed = 1;
862
863 break;
864
865 default:
866 printk("Load not yet supported");
867 er = EMULATE_FAIL;
868 break;
869 }
870
871 return er;
872}
873
874int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
875{
876 unsigned long offset = (va & ~PAGE_MASK);
877 struct kvm *kvm = vcpu->kvm;
878 unsigned long pa;
879 gfn_t gfn;
880 pfn_t pfn;
881
882 gfn = va >> PAGE_SHIFT;
883
884 if (gfn >= kvm->arch.guest_pmap_npages) {
885 printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
886 kvm_mips_dump_host_tlbs();
887 kvm_arch_vcpu_dump_regs(vcpu);
888 return -1;
889 }
890 pfn = kvm->arch.guest_pmap[gfn];
891 pa = (pfn << PAGE_SHIFT) | offset;
892
893 printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
894
895 mips32_SyncICache(CKSEG0ADDR(pa), 32);
896 return 0;
897}
898
899#define MIPS_CACHE_OP_INDEX_INV 0x0
900#define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
901#define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
902#define MIPS_CACHE_OP_IMP 0x3
903#define MIPS_CACHE_OP_HIT_INV 0x4
904#define MIPS_CACHE_OP_FILL_WB_INV 0x5
905#define MIPS_CACHE_OP_HIT_HB 0x6
906#define MIPS_CACHE_OP_FETCH_LOCK 0x7
907
908#define MIPS_CACHE_ICACHE 0x0
909#define MIPS_CACHE_DCACHE 0x1
910#define MIPS_CACHE_SEC 0x3
911
912enum emulation_result
913kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
914 struct kvm_run *run, struct kvm_vcpu *vcpu)
915{
916 struct mips_coproc *cop0 = vcpu->arch.cop0;
917 extern void (*r4k_blast_dcache) (void);
918 extern void (*r4k_blast_icache) (void);
919 enum emulation_result er = EMULATE_DONE;
920 int32_t offset, cache, op_inst, op, base;
921 struct kvm_vcpu_arch *arch = &vcpu->arch;
922 unsigned long va;
923 unsigned long curr_pc;
924
925 /*
926 * Update PC and hold onto current PC in case there is
927 * an error and we want to rollback the PC
928 */
929 curr_pc = vcpu->arch.pc;
930 er = update_pc(vcpu, cause);
931 if (er == EMULATE_FAIL)
932 return er;
933
934 base = (inst >> 21) & 0x1f;
935 op_inst = (inst >> 16) & 0x1f;
936 offset = inst & 0xffff;
937 cache = (inst >> 16) & 0x3;
938 op = (inst >> 18) & 0x7;
939
940 va = arch->gprs[base] + offset;
941
942 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
943 cache, op, base, arch->gprs[base], offset);
944
945 /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
946 * the caches entirely by stepping through all the ways/indexes
947 */
948 if (op == MIPS_CACHE_OP_INDEX_INV) {
949 kvm_debug
950 ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
951 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
952 arch->gprs[base], offset);
953
954 if (cache == MIPS_CACHE_DCACHE)
955 r4k_blast_dcache();
956 else if (cache == MIPS_CACHE_ICACHE)
957 r4k_blast_icache();
958 else {
959 printk("%s: unsupported CACHE INDEX operation\n",
960 __func__);
961 return EMULATE_FAIL;
962 }
963
964#ifdef CONFIG_KVM_MIPS_DYN_TRANS
965 kvm_mips_trans_cache_index(inst, opc, vcpu);
966#endif
967 goto done;
968 }
969
970 preempt_disable();
971 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
972
973 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
974 kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
975 }
976 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
977 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
978 int index;
979
980 /* If an entry already exists then skip */
981 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
982 goto skip_fault;
983 }
984
985 /* If address not in the guest TLB, then give the guest a fault, the
986 * resulting handler will do the right thing
987 */
988 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
989 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
990
991 if (index < 0) {
992 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
993 vcpu->arch.host_cp0_badvaddr = va;
994 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
995 vcpu);
996 preempt_enable();
997 goto dont_update_pc;
998 } else {
999 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1000 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1001 if (!TLB_IS_VALID(*tlb, va)) {
1002 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1003 run, vcpu);
1004 preempt_enable();
1005 goto dont_update_pc;
1006 } else {
1007 /* We fault an entry from the guest tlb to the shadow host TLB */
1008 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1009 NULL,
1010 NULL);
1011 }
1012 }
1013 } else {
1014 printk
1015 ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1016 cache, op, base, arch->gprs[base], offset);
1017 er = EMULATE_FAIL;
1018 preempt_enable();
1019 goto dont_update_pc;
1020
1021 }
1022
1023skip_fault:
1024 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1025 if (cache == MIPS_CACHE_DCACHE
1026 && (op == MIPS_CACHE_OP_FILL_WB_INV
1027 || op == MIPS_CACHE_OP_HIT_INV)) {
1028 flush_dcache_line(va);
1029
1030#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1031 /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
1032 kvm_mips_trans_cache_va(inst, opc, vcpu);
1033#endif
1034 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
1035 flush_dcache_line(va);
1036 flush_icache_line(va);
1037
1038#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1039 /* Replace the CACHE instruction, with a SYNCI */
1040 kvm_mips_trans_cache_va(inst, opc, vcpu);
1041#endif
1042 } else {
1043 printk
1044 ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1045 cache, op, base, arch->gprs[base], offset);
1046 er = EMULATE_FAIL;
1047 preempt_enable();
1048 goto dont_update_pc;
1049 }
1050
1051 preempt_enable();
1052
1053 dont_update_pc:
1054 /*
1055 * Rollback PC
1056 */
1057 vcpu->arch.pc = curr_pc;
1058 done:
1059 return er;
1060}
1061
1062enum emulation_result
1063kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1064 struct kvm_run *run, struct kvm_vcpu *vcpu)
1065{
1066 enum emulation_result er = EMULATE_DONE;
1067 uint32_t inst;
1068
1069 /*
1070 * Fetch the instruction.
1071 */
1072 if (cause & CAUSEF_BD) {
1073 opc += 1;
1074 }
1075
1076 inst = kvm_get_inst(opc, vcpu);
1077
1078 switch (((union mips_instruction)inst).r_format.opcode) {
1079 case cop0_op:
1080 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1081 break;
1082 case sb_op:
1083 case sh_op:
1084 case sw_op:
1085 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1086 break;
1087 case lb_op:
1088 case lbu_op:
1089 case lhu_op:
1090 case lh_op:
1091 case lw_op:
1092 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1093 break;
1094
1095 case cache_op:
1096 ++vcpu->stat.cache_exits;
1097 trace_kvm_exit(vcpu, CACHE_EXITS);
1098 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1099 break;
1100
1101 default:
1102 printk("Instruction emulation not supported (%p/%#x)\n", opc,
1103 inst);
1104 kvm_arch_vcpu_dump_regs(vcpu);
1105 er = EMULATE_FAIL;
1106 break;
1107 }
1108
1109 return er;
1110}
1111
1112enum emulation_result
1113kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
1114 struct kvm_run *run, struct kvm_vcpu *vcpu)
1115{
1116 struct mips_coproc *cop0 = vcpu->arch.cop0;
1117 struct kvm_vcpu_arch *arch = &vcpu->arch;
1118 enum emulation_result er = EMULATE_DONE;
1119
1120 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1121 /* save old pc */
1122 kvm_write_c0_guest_epc(cop0, arch->pc);
1123 kvm_set_c0_guest_status(cop0, ST0_EXL);
1124
1125 if (cause & CAUSEF_BD)
1126 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1127 else
1128 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1129
1130 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1131
1132 kvm_change_c0_guest_cause(cop0, (0xff),
1133 (T_SYSCALL << CAUSEB_EXCCODE));
1134
1135 /* Set PC to the exception entry point */
1136 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1137
1138 } else {
1139 printk("Trying to deliver SYSCALL when EXL is already set\n");
1140 er = EMULATE_FAIL;
1141 }
1142
1143 return er;
1144}
1145
1146enum emulation_result
1147kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
1148 struct kvm_run *run, struct kvm_vcpu *vcpu)
1149{
1150 struct mips_coproc *cop0 = vcpu->arch.cop0;
1151 struct kvm_vcpu_arch *arch = &vcpu->arch;
1152 enum emulation_result er = EMULATE_DONE;
1153 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1154 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1155
1156 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1157 /* save old pc */
1158 kvm_write_c0_guest_epc(cop0, arch->pc);
1159 kvm_set_c0_guest_status(cop0, ST0_EXL);
1160
1161 if (cause & CAUSEF_BD)
1162 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1163 else
1164 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1165
1166 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1167 arch->pc);
1168
1169 /* set pc to the exception entry point */
1170 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1171
1172 } else {
1173 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1174 arch->pc);
1175
1176 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1177 }
1178
1179 kvm_change_c0_guest_cause(cop0, (0xff),
1180 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1181
1182 /* setup badvaddr, context and entryhi registers for the guest */
1183 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1184 /* XXXKYMA: is the context register used by linux??? */
1185 kvm_write_c0_guest_entryhi(cop0, entryhi);
1186 /* Blow away the shadow host TLBs */
1187 kvm_mips_flush_host_tlb(1);
1188
1189 return er;
1190}
1191
1192enum emulation_result
1193kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
1194 struct kvm_run *run, struct kvm_vcpu *vcpu)
1195{
1196 struct mips_coproc *cop0 = vcpu->arch.cop0;
1197 struct kvm_vcpu_arch *arch = &vcpu->arch;
1198 enum emulation_result er = EMULATE_DONE;
1199 unsigned long entryhi =
1200 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1201 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1202
1203 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1204 /* save old pc */
1205 kvm_write_c0_guest_epc(cop0, arch->pc);
1206 kvm_set_c0_guest_status(cop0, ST0_EXL);
1207
1208 if (cause & CAUSEF_BD)
1209 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1210 else
1211 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1212
1213 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1214 arch->pc);
1215
1216 /* set pc to the exception entry point */
1217 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1218
1219 } else {
1220 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1221 arch->pc);
1222 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1223 }
1224
1225 kvm_change_c0_guest_cause(cop0, (0xff),
1226 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1227
1228 /* setup badvaddr, context and entryhi registers for the guest */
1229 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1230 /* XXXKYMA: is the context register used by linux??? */
1231 kvm_write_c0_guest_entryhi(cop0, entryhi);
1232 /* Blow away the shadow host TLBs */
1233 kvm_mips_flush_host_tlb(1);
1234
1235 return er;
1236}
1237
1238enum emulation_result
1239kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
1240 struct kvm_run *run, struct kvm_vcpu *vcpu)
1241{
1242 struct mips_coproc *cop0 = vcpu->arch.cop0;
1243 struct kvm_vcpu_arch *arch = &vcpu->arch;
1244 enum emulation_result er = EMULATE_DONE;
1245 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1246 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1247
1248 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1249 /* save old pc */
1250 kvm_write_c0_guest_epc(cop0, arch->pc);
1251 kvm_set_c0_guest_status(cop0, ST0_EXL);
1252
1253 if (cause & CAUSEF_BD)
1254 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1255 else
1256 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1257
1258 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1259 arch->pc);
1260
1261 /* Set PC to the exception entry point */
1262 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1263 } else {
1264 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1265 arch->pc);
1266 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1267 }
1268
1269 kvm_change_c0_guest_cause(cop0, (0xff),
1270 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1271
1272 /* setup badvaddr, context and entryhi registers for the guest */
1273 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1274 /* XXXKYMA: is the context register used by linux??? */
1275 kvm_write_c0_guest_entryhi(cop0, entryhi);
1276 /* Blow away the shadow host TLBs */
1277 kvm_mips_flush_host_tlb(1);
1278
1279 return er;
1280}
1281
1282enum emulation_result
1283kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
1284 struct kvm_run *run, struct kvm_vcpu *vcpu)
1285{
1286 struct mips_coproc *cop0 = vcpu->arch.cop0;
1287 struct kvm_vcpu_arch *arch = &vcpu->arch;
1288 enum emulation_result er = EMULATE_DONE;
1289 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1290 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1291
1292 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1293 /* save old pc */
1294 kvm_write_c0_guest_epc(cop0, arch->pc);
1295 kvm_set_c0_guest_status(cop0, ST0_EXL);
1296
1297 if (cause & CAUSEF_BD)
1298 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1299 else
1300 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1301
1302 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1303 arch->pc);
1304
1305 /* Set PC to the exception entry point */
1306 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1307 } else {
1308 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1309 arch->pc);
1310 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1311 }
1312
1313 kvm_change_c0_guest_cause(cop0, (0xff),
1314 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1315
1316 /* setup badvaddr, context and entryhi registers for the guest */
1317 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1318 /* XXXKYMA: is the context register used by linux??? */
1319 kvm_write_c0_guest_entryhi(cop0, entryhi);
1320 /* Blow away the shadow host TLBs */
1321 kvm_mips_flush_host_tlb(1);
1322
1323 return er;
1324}
1325
1326/* TLBMOD: store into address matching TLB with Dirty bit off */
1327enum emulation_result
1328kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1329 struct kvm_run *run, struct kvm_vcpu *vcpu)
1330{
1331 enum emulation_result er = EMULATE_DONE;
1332
1333#ifdef DEBUG
1334 /*
1335 * If address not in the guest TLB, then we are in trouble
1336 */
1337 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1338 if (index < 0) {
1339 /* XXXKYMA Invalidate and retry */
1340 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1341 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1342 __func__, entryhi);
1343 kvm_mips_dump_guest_tlbs(vcpu);
1344 kvm_mips_dump_host_tlbs();
1345 return EMULATE_FAIL;
1346 }
1347#endif
1348
1349 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
1350 return er;
1351}
1352
1353enum emulation_result
1354kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
1355 struct kvm_run *run, struct kvm_vcpu *vcpu)
1356{
1357 struct mips_coproc *cop0 = vcpu->arch.cop0;
1358 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1359 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1360 struct kvm_vcpu_arch *arch = &vcpu->arch;
1361 enum emulation_result er = EMULATE_DONE;
1362
1363 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1364 /* save old pc */
1365 kvm_write_c0_guest_epc(cop0, arch->pc);
1366 kvm_set_c0_guest_status(cop0, ST0_EXL);
1367
1368 if (cause & CAUSEF_BD)
1369 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1370 else
1371 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1372
1373 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1374 arch->pc);
1375
1376 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1377 } else {
1378 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
1379 arch->pc);
1380 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1381 }
1382
1383 kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
1384
1385 /* setup badvaddr, context and entryhi registers for the guest */
1386 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1387 /* XXXKYMA: is the context register used by linux??? */
1388 kvm_write_c0_guest_entryhi(cop0, entryhi);
1389 /* Blow away the shadow host TLBs */
1390 kvm_mips_flush_host_tlb(1);
1391
1392 return er;
1393}
1394
1395enum emulation_result
1396kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
1397 struct kvm_run *run, struct kvm_vcpu *vcpu)
1398{
1399 struct mips_coproc *cop0 = vcpu->arch.cop0;
1400 struct kvm_vcpu_arch *arch = &vcpu->arch;
1401 enum emulation_result er = EMULATE_DONE;
1402
1403 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1404 /* save old pc */
1405 kvm_write_c0_guest_epc(cop0, arch->pc);
1406 kvm_set_c0_guest_status(cop0, ST0_EXL);
1407
1408 if (cause & CAUSEF_BD)
1409 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1410 else
1411 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1412
1413 }
1414
1415 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1416
1417 kvm_change_c0_guest_cause(cop0, (0xff),
1418 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
1419 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
1420
1421 return er;
1422}
1423
1424enum emulation_result
1425kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
1426 struct kvm_run *run, struct kvm_vcpu *vcpu)
1427{
1428 struct mips_coproc *cop0 = vcpu->arch.cop0;
1429 struct kvm_vcpu_arch *arch = &vcpu->arch;
1430 enum emulation_result er = EMULATE_DONE;
1431
1432 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1433 /* save old pc */
1434 kvm_write_c0_guest_epc(cop0, arch->pc);
1435 kvm_set_c0_guest_status(cop0, ST0_EXL);
1436
1437 if (cause & CAUSEF_BD)
1438 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1439 else
1440 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1441
1442 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
1443
1444 kvm_change_c0_guest_cause(cop0, (0xff),
1445 (T_RES_INST << CAUSEB_EXCCODE));
1446
1447 /* Set PC to the exception entry point */
1448 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1449
1450 } else {
1451 kvm_err("Trying to deliver RI when EXL is already set\n");
1452 er = EMULATE_FAIL;
1453 }
1454
1455 return er;
1456}
1457
1458enum emulation_result
1459kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
1460 struct kvm_run *run, struct kvm_vcpu *vcpu)
1461{
1462 struct mips_coproc *cop0 = vcpu->arch.cop0;
1463 struct kvm_vcpu_arch *arch = &vcpu->arch;
1464 enum emulation_result er = EMULATE_DONE;
1465
1466 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1467 /* save old pc */
1468 kvm_write_c0_guest_epc(cop0, arch->pc);
1469 kvm_set_c0_guest_status(cop0, ST0_EXL);
1470
1471 if (cause & CAUSEF_BD)
1472 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1473 else
1474 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1475
1476 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
1477
1478 kvm_change_c0_guest_cause(cop0, (0xff),
1479 (T_BREAK << CAUSEB_EXCCODE));
1480
1481 /* Set PC to the exception entry point */
1482 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1483
1484 } else {
1485 printk("Trying to deliver BP when EXL is already set\n");
1486 er = EMULATE_FAIL;
1487 }
1488
1489 return er;
1490}
1491
1492/*
1493 * ll/sc, rdhwr, sync emulation
1494 */
1495
1496#define OPCODE 0xfc000000
1497#define BASE 0x03e00000
1498#define RT 0x001f0000
1499#define OFFSET 0x0000ffff
1500#define LL 0xc0000000
1501#define SC 0xe0000000
1502#define SPEC0 0x00000000
1503#define SPEC3 0x7c000000
1504#define RD 0x0000f800
1505#define FUNC 0x0000003f
1506#define SYNC 0x0000000f
1507#define RDHWR 0x0000003b
1508
1509enum emulation_result
1510kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
1511 struct kvm_run *run, struct kvm_vcpu *vcpu)
1512{
1513 struct mips_coproc *cop0 = vcpu->arch.cop0;
1514 struct kvm_vcpu_arch *arch = &vcpu->arch;
1515 enum emulation_result er = EMULATE_DONE;
1516 unsigned long curr_pc;
1517 uint32_t inst;
1518
1519 /*
1520 * Update PC and hold onto current PC in case there is
1521 * an error and we want to rollback the PC
1522 */
1523 curr_pc = vcpu->arch.pc;
1524 er = update_pc(vcpu, cause);
1525 if (er == EMULATE_FAIL)
1526 return er;
1527
1528 /*
1529 * Fetch the instruction.
1530 */
1531 if (cause & CAUSEF_BD)
1532 opc += 1;
1533
1534 inst = kvm_get_inst(opc, vcpu);
1535
1536 if (inst == KVM_INVALID_INST) {
1537 printk("%s: Cannot get inst @ %p\n", __func__, opc);
1538 return EMULATE_FAIL;
1539 }
1540
1541 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
1542 int rd = (inst & RD) >> 11;
1543 int rt = (inst & RT) >> 16;
1544 switch (rd) {
1545 case 0: /* CPU number */
1546 arch->gprs[rt] = 0;
1547 break;
1548 case 1: /* SYNCI length */
1549 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
1550 current_cpu_data.icache.linesz);
1551 break;
1552 case 2: /* Read count register */
1553 printk("RDHWR: Cont register\n");
1554 arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
1555 break;
1556 case 3: /* Count register resolution */
1557 switch (current_cpu_data.cputype) {
1558 case CPU_20KC:
1559 case CPU_25KF:
1560 arch->gprs[rt] = 1;
1561 break;
1562 default:
1563 arch->gprs[rt] = 2;
1564 }
1565 break;
1566 case 29:
1567#if 1
1568 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
1569#else
1570 /* UserLocal not implemented */
1571 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
1572#endif
1573 break;
1574
1575 default:
1576 printk("RDHWR not supported\n");
1577 er = EMULATE_FAIL;
1578 break;
1579 }
1580 } else {
1581 printk("Emulate RI not supported @ %p: %#x\n", opc, inst);
1582 er = EMULATE_FAIL;
1583 }
1584
1585 /*
1586 * Rollback PC only if emulation was unsuccessful
1587 */
1588 if (er == EMULATE_FAIL) {
1589 vcpu->arch.pc = curr_pc;
1590 }
1591 return er;
1592}
1593
1594enum emulation_result
1595kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
1596{
1597 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
1598 enum emulation_result er = EMULATE_DONE;
1599 unsigned long curr_pc;
1600
1601 if (run->mmio.len > sizeof(*gpr)) {
1602 printk("Bad MMIO length: %d", run->mmio.len);
1603 er = EMULATE_FAIL;
1604 goto done;
1605 }
1606
1607 /*
1608 * Update PC and hold onto current PC in case there is
1609 * an error and we want to rollback the PC
1610 */
1611 curr_pc = vcpu->arch.pc;
1612 er = update_pc(vcpu, vcpu->arch.pending_load_cause);
1613 if (er == EMULATE_FAIL)
1614 return er;
1615
1616 switch (run->mmio.len) {
1617 case 4:
1618 *gpr = *(int32_t *) run->mmio.data;
1619 break;
1620
1621 case 2:
1622 if (vcpu->mmio_needed == 2)
1623 *gpr = *(int16_t *) run->mmio.data;
1624 else
1625 *gpr = *(int16_t *) run->mmio.data;
1626
1627 break;
1628 case 1:
1629 if (vcpu->mmio_needed == 2)
1630 *gpr = *(int8_t *) run->mmio.data;
1631 else
1632 *gpr = *(u8 *) run->mmio.data;
1633 break;
1634 }
1635
1636 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
1637 kvm_debug
1638 ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
1639 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
1640 vcpu->mmio_needed);
1641
1642done:
1643 return er;
1644}
1645
1646static enum emulation_result
1647kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
1648 struct kvm_run *run, struct kvm_vcpu *vcpu)
1649{
1650 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1651 struct mips_coproc *cop0 = vcpu->arch.cop0;
1652 struct kvm_vcpu_arch *arch = &vcpu->arch;
1653 enum emulation_result er = EMULATE_DONE;
1654
1655 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1656 /* save old pc */
1657 kvm_write_c0_guest_epc(cop0, arch->pc);
1658 kvm_set_c0_guest_status(cop0, ST0_EXL);
1659
1660 if (cause & CAUSEF_BD)
1661 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1662 else
1663 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1664
1665 kvm_change_c0_guest_cause(cop0, (0xff),
1666 (exccode << CAUSEB_EXCCODE));
1667
1668 /* Set PC to the exception entry point */
1669 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1670 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1671
1672 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
1673 exccode, kvm_read_c0_guest_epc(cop0),
1674 kvm_read_c0_guest_badvaddr(cop0));
1675 } else {
1676 printk("Trying to deliver EXC when EXL is already set\n");
1677 er = EMULATE_FAIL;
1678 }
1679
1680 return er;
1681}
1682
1683enum emulation_result
1684kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
1685 struct kvm_run *run, struct kvm_vcpu *vcpu)
1686{
1687 enum emulation_result er = EMULATE_DONE;
1688 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1689 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1690
1691 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
1692
1693 if (usermode) {
1694 switch (exccode) {
1695 case T_INT:
1696 case T_SYSCALL:
1697 case T_BREAK:
1698 case T_RES_INST:
1699 break;
1700
1701 case T_COP_UNUSABLE:
1702 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
1703 er = EMULATE_PRIV_FAIL;
1704 break;
1705
1706 case T_TLB_MOD:
1707 break;
1708
1709 case T_TLB_LD_MISS:
1710 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1711 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
1712 printk("%s: LD MISS @ %#lx\n", __func__,
1713 badvaddr);
1714 cause &= ~0xff;
1715 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
1716 er = EMULATE_PRIV_FAIL;
1717 }
1718 break;
1719
1720 case T_TLB_ST_MISS:
1721 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1722 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
1723 printk("%s: ST MISS @ %#lx\n", __func__,
1724 badvaddr);
1725 cause &= ~0xff;
1726 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
1727 er = EMULATE_PRIV_FAIL;
1728 }
1729 break;
1730
1731 case T_ADDR_ERR_ST:
1732 printk("%s: address error ST @ %#lx\n", __func__,
1733 badvaddr);
1734 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
1735 cause &= ~0xff;
1736 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
1737 }
1738 er = EMULATE_PRIV_FAIL;
1739 break;
1740 case T_ADDR_ERR_LD:
1741 printk("%s: address error LD @ %#lx\n", __func__,
1742 badvaddr);
1743 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
1744 cause &= ~0xff;
1745 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
1746 }
1747 er = EMULATE_PRIV_FAIL;
1748 break;
1749 default:
1750 er = EMULATE_PRIV_FAIL;
1751 break;
1752 }
1753 }
1754
1755 if (er == EMULATE_PRIV_FAIL) {
1756 kvm_mips_emulate_exc(cause, opc, run, vcpu);
1757 }
1758 return er;
1759}
1760
1761/* User Address (UA) fault, this could happen if
1762 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
1763 * case we pass on the fault to the guest kernel and let it handle it.
1764 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
1765 * case we inject the TLB from the Guest TLB into the shadow host TLB
1766 */
1767enum emulation_result
1768kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
1769 struct kvm_run *run, struct kvm_vcpu *vcpu)
1770{
1771 enum emulation_result er = EMULATE_DONE;
1772 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1773 unsigned long va = vcpu->arch.host_cp0_badvaddr;
1774 int index;
1775
1776 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
1777 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
1778
1779 /* KVM would not have got the exception if this entry was valid in the shadow host TLB
1780 * Check the Guest TLB, if the entry is not there then send the guest an
1781 * exception. The guest exc handler should then inject an entry into the
1782 * guest TLB
1783 */
1784 index = kvm_mips_guest_tlb_lookup(vcpu,
1785 (va & VPN2_MASK) |
1786 ASID_MASK(kvm_read_c0_guest_entryhi
1787 (vcpu->arch.cop0)));
1788 if (index < 0) {
1789 if (exccode == T_TLB_LD_MISS) {
1790 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
1791 } else if (exccode == T_TLB_ST_MISS) {
1792 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
1793 } else {
1794 printk("%s: invalid exc code: %d\n", __func__, exccode);
1795 er = EMULATE_FAIL;
1796 }
1797 } else {
1798 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1799
1800 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1801 if (!TLB_IS_VALID(*tlb, va)) {
1802 if (exccode == T_TLB_LD_MISS) {
1803 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
1804 vcpu);
1805 } else if (exccode == T_TLB_ST_MISS) {
1806 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
1807 vcpu);
1808 } else {
1809 printk("%s: invalid exc code: %d\n", __func__,
1810 exccode);
1811 er = EMULATE_FAIL;
1812 }
1813 } else {
1814#ifdef DEBUG
1815 kvm_debug
1816 ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
1817 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
1818#endif
1819 /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
1820 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
1821 NULL);
1822 }
1823 }
1824
1825 return er;
1826}
diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/kvm_mips_int.c
new file mode 100644
index 000000000000..1e5de16afe29
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_int.c
@@ -0,0 +1,243 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Interrupt delivery
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/vmalloc.h>
16#include <linux/fs.h>
17#include <linux/bootmem.h>
18#include <asm/page.h>
19#include <asm/cacheflush.h>
20
21#include <linux/kvm_host.h>
22
23#include "kvm_mips_int.h"
24
25void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
26{
27 set_bit(priority, &vcpu->arch.pending_exceptions);
28}
29
30void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
31{
32 clear_bit(priority, &vcpu->arch.pending_exceptions);
33}
34
35void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
36{
37 /* Cause bits to reflect the pending timer interrupt,
38 * the EXC code will be set when we are actually
39 * delivering the interrupt:
40 */
41 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
42
43 /* Queue up an INT exception for the core */
44 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
45
46}
47
48void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
49{
50 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
51 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
52}
53
54void
55kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
56{
57 int intr = (int)irq->irq;
58
59 /* Cause bits to reflect the pending IO interrupt,
60 * the EXC code will be set when we are actually
61 * delivering the interrupt:
62 */
63 switch (intr) {
64 case 2:
65 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
66 /* Queue up an INT exception for the core */
67 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
68 break;
69
70 case 3:
71 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
72 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
73 break;
74
75 case 4:
76 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
77 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
78 break;
79
80 default:
81 break;
82 }
83
84}
85
86void
87kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
88 struct kvm_mips_interrupt *irq)
89{
90 int intr = (int)irq->irq;
91 switch (intr) {
92 case -2:
93 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
94 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
95 break;
96
97 case -3:
98 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
99 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
100 break;
101
102 case -4:
103 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
104 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
105 break;
106
107 default:
108 break;
109 }
110
111}
112
113/* Deliver the interrupt of the corresponding priority, if possible. */
114int
115kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
116 uint32_t cause)
117{
118 int allowed = 0;
119 uint32_t exccode;
120
121 struct kvm_vcpu_arch *arch = &vcpu->arch;
122 struct mips_coproc *cop0 = vcpu->arch.cop0;
123
124 switch (priority) {
125 case MIPS_EXC_INT_TIMER:
126 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
127 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
128 && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
129 allowed = 1;
130 exccode = T_INT;
131 }
132 break;
133
134 case MIPS_EXC_INT_IO:
135 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
136 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
137 && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
138 allowed = 1;
139 exccode = T_INT;
140 }
141 break;
142
143 case MIPS_EXC_INT_IPI_1:
144 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
145 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
146 && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
147 allowed = 1;
148 exccode = T_INT;
149 }
150 break;
151
152 case MIPS_EXC_INT_IPI_2:
153 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
154 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
155 && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
156 allowed = 1;
157 exccode = T_INT;
158 }
159 break;
160
161 default:
162 break;
163 }
164
165 /* Are we allowed to deliver the interrupt ??? */
166 if (allowed) {
167
168 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
169 /* save old pc */
170 kvm_write_c0_guest_epc(cop0, arch->pc);
171 kvm_set_c0_guest_status(cop0, ST0_EXL);
172
173 if (cause & CAUSEF_BD)
174 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
175 else
176 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
177
178 kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
179
180 } else
181 kvm_err("Trying to deliver interrupt when EXL is already set\n");
182
183 kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
184 (exccode << CAUSEB_EXCCODE));
185
186 /* XXXSL Set PC to the interrupt exception entry point */
187 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
188 arch->pc = KVM_GUEST_KSEG0 + 0x200;
189 else
190 arch->pc = KVM_GUEST_KSEG0 + 0x180;
191
192 clear_bit(priority, &vcpu->arch.pending_exceptions);
193 }
194
195 return allowed;
196}
197
198int
199kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
200 uint32_t cause)
201{
202 return 1;
203}
204
205void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
206{
207 unsigned long *pending = &vcpu->arch.pending_exceptions;
208 unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
209 unsigned int priority;
210
211 if (!(*pending) && !(*pending_clr))
212 return;
213
214 priority = __ffs(*pending_clr);
215 while (priority <= MIPS_EXC_MAX) {
216 if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
217 if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
218 break;
219 }
220
221 priority = find_next_bit(pending_clr,
222 BITS_PER_BYTE * sizeof(*pending_clr),
223 priority + 1);
224 }
225
226 priority = __ffs(*pending);
227 while (priority <= MIPS_EXC_MAX) {
228 if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
229 if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
230 break;
231 }
232
233 priority = find_next_bit(pending,
234 BITS_PER_BYTE * sizeof(*pending),
235 priority + 1);
236 }
237
238}
239
240int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
241{
242 return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
243}
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h
new file mode 100644
index 000000000000..20da7d29eede
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_int.h
@@ -0,0 +1,49 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Interrupts
7* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
8* Authors: Sanjay Lal <sanjayl@kymasys.com>
9*/
10
11/* MIPS Exception Priorities, exceptions (including interrupts) are queued up
12 * for the guest in the order specified by their priorities
13 */
14
15#define MIPS_EXC_RESET 0
16#define MIPS_EXC_SRESET 1
17#define MIPS_EXC_DEBUG_ST 2
18#define MIPS_EXC_DEBUG 3
19#define MIPS_EXC_DDB 4
20#define MIPS_EXC_NMI 5
21#define MIPS_EXC_MCHK 6
22#define MIPS_EXC_INT_TIMER 7
23#define MIPS_EXC_INT_IO 8
24#define MIPS_EXC_EXECUTE 9
25#define MIPS_EXC_INT_IPI_1 10
26#define MIPS_EXC_INT_IPI_2 11
27#define MIPS_EXC_MAX 12
28/* XXXSL More to follow */
29
30#define C_TI (_ULCAST_(1) << 30)
31
32#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
33#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
34
35void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
36void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
37int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
38
39void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
40void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
41void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
42 struct kvm_mips_interrupt *irq);
43void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
44 struct kvm_mips_interrupt *irq);
45int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
46 uint32_t cause);
47int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
48 uint32_t cause);
49void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h
new file mode 100644
index 000000000000..86d3b4cc348b
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_opcode.h
@@ -0,0 +1,24 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10/*
11 * Define opcode values not defined in <asm/isnt.h>
12 */
13
14#ifndef __KVM_MIPS_OPCODE_H__
15#define __KVM_MIPS_OPCODE_H__
16
17/* COP0 Ops */
18#define mfmcz_op 0x0b /* 01011 */
19#define wrpgpr_op 0x0e /* 01110 */
20
21/* COP0 opcodes (only if COP0 and CO=1): */
22#define wait_op 0x20 /* 100000 */
23
24#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/kvm_mips_stats.c
new file mode 100644
index 000000000000..075904bcac1b
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_stats.c
@@ -0,0 +1,82 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: COP0 access histogram
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/kvm_host.h>
13
14char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
15 "WAIT",
16 "CACHE",
17 "Signal",
18 "Interrupt",
19 "COP0/1 Unusable",
20 "TLB Mod",
21 "TLB Miss (LD)",
22 "TLB Miss (ST)",
23 "Address Err (ST)",
24 "Address Error (LD)",
25 "System Call",
26 "Reserved Inst",
27 "Break Inst",
28 "D-Cache Flushes",
29};
30
31char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
32 "Index",
33 "Random",
34 "EntryLo0",
35 "EntryLo1",
36 "Context",
37 "PG Mask",
38 "Wired",
39 "HWREna",
40 "BadVAddr",
41 "Count",
42 "EntryHI",
43 "Compare",
44 "Status",
45 "Cause",
46 "EXC PC",
47 "PRID",
48 "Config",
49 "LLAddr",
50 "Watch Lo",
51 "Watch Hi",
52 "X Context",
53 "Reserved",
54 "Impl Dep",
55 "Debug",
56 "DEPC",
57 "PerfCnt",
58 "ErrCtl",
59 "CacheErr",
60 "TagLo",
61 "TagHi",
62 "ErrorEPC",
63 "DESAVE"
64};
65
66int kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
67{
68#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
69 int i, j;
70
71 printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
72 for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
73 for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
74 if (vcpu->arch.cop0->stat[i][j])
75 printk("%s[%d]: %lu\n", kvm_cop0_str[i], j,
76 vcpu->arch.cop0->stat[i][j]);
77 }
78 }
79#endif
80
81 return 0;
82}
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
new file mode 100644
index 000000000000..89511a9258d3
--- /dev/null
+++ b/arch/mips/kvm/kvm_tlb.c
@@ -0,0 +1,928 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7* TLB handlers run from KSEG0
8*
9* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10* Authors: Sanjay Lal <sanjayl@kymasys.com>
11*/
12
13#include <linux/init.h>
14#include <linux/sched.h>
15#include <linux/smp.h>
16#include <linux/mm.h>
17#include <linux/delay.h>
18#include <linux/module.h>
19#include <linux/kvm_host.h>
20
21#include <asm/cpu.h>
22#include <asm/bootinfo.h>
23#include <asm/mmu_context.h>
24#include <asm/pgtable.h>
25#include <asm/cacheflush.h>
26
27#undef CONFIG_MIPS_MT
28#include <asm/r4kcache.h>
29#define CONFIG_MIPS_MT
30
31#define KVM_GUEST_PC_TLB 0
32#define KVM_GUEST_SP_TLB 1
33
34#define PRIx64 "llx"
35
36/* Use VZ EntryHi.EHINV to invalidate TLB entries */
37#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
38
39atomic_t kvm_mips_instance;
40EXPORT_SYMBOL(kvm_mips_instance);
41
42/* These function pointers are initialized once the KVM module is loaded */
43pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
44EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
45
46void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
47EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
48
49bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
50EXPORT_SYMBOL(kvm_mips_is_error_pfn);
51
52uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
53{
54 return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]);
55}
56
57
58uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
59{
60 return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]);
61}
62
63inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
64{
65 return vcpu->kvm->arch.commpage_tlb;
66}
67
68
69/*
70 * Structure defining an tlb entry data set.
71 */
72
73void kvm_mips_dump_host_tlbs(void)
74{
75 unsigned long old_entryhi;
76 unsigned long old_pagemask;
77 struct kvm_mips_tlb tlb;
78 unsigned long flags;
79 int i;
80
81 local_irq_save(flags);
82
83 old_entryhi = read_c0_entryhi();
84 old_pagemask = read_c0_pagemask();
85
86 printk("HOST TLBs:\n");
87 printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi()));
88
89 for (i = 0; i < current_cpu_data.tlbsize; i++) {
90 write_c0_index(i);
91 mtc0_tlbw_hazard();
92
93 tlb_read();
94 tlbw_use_hazard();
95
96 tlb.tlb_hi = read_c0_entryhi();
97 tlb.tlb_lo0 = read_c0_entrylo0();
98 tlb.tlb_lo1 = read_c0_entrylo1();
99 tlb.tlb_mask = read_c0_pagemask();
100
101 printk("TLB%c%3d Hi 0x%08lx ",
102 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
103 i, tlb.tlb_hi);
104 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
105 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
106 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
107 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
108 (tlb.tlb_lo0 >> 3) & 7);
109 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
110 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
111 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
112 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
113 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
114 }
115 write_c0_entryhi(old_entryhi);
116 write_c0_pagemask(old_pagemask);
117 mtc0_tlbw_hazard();
118 local_irq_restore(flags);
119}
120
121void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
122{
123 struct mips_coproc *cop0 = vcpu->arch.cop0;
124 struct kvm_mips_tlb tlb;
125 int i;
126
127 printk("Guest TLBs:\n");
128 printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
129
130 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
131 tlb = vcpu->arch.guest_tlb[i];
132 printk("TLB%c%3d Hi 0x%08lx ",
133 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
134 i, tlb.tlb_hi);
135 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
136 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
137 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
138 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
139 (tlb.tlb_lo0 >> 3) & 7);
140 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
141 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
142 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
143 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
144 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
145 }
146}
147
148void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
149{
150 int i;
151 volatile struct kvm_mips_tlb tlb;
152
153 printk("Shadow TLBs:\n");
154 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
155 tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
156 printk("TLB%c%3d Hi 0x%08lx ",
157 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
158 i, tlb.tlb_hi);
159 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
160 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
161 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
162 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
163 (tlb.tlb_lo0 >> 3) & 7);
164 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
165 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
166 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
167 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
168 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
169 }
170}
171
172static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
173{
174 pfn_t pfn;
175
176 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
177 return;
178
179 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
180
181 if (kvm_mips_is_error_pfn(pfn)) {
182 panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
183 }
184
185 kvm->arch.guest_pmap[gfn] = pfn;
186 return;
187}
188
189/* Translate guest KSEG0 addresses to Host PA */
190unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
191 unsigned long gva)
192{
193 gfn_t gfn;
194 uint32_t offset = gva & ~PAGE_MASK;
195 struct kvm *kvm = vcpu->kvm;
196
197 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
198 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
199 __builtin_return_address(0), gva);
200 return KVM_INVALID_PAGE;
201 }
202
203 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
204
205 if (gfn >= kvm->arch.guest_pmap_npages) {
206 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
207 gva);
208 return KVM_INVALID_PAGE;
209 }
210 kvm_mips_map_page(vcpu->kvm, gfn);
211 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
212}
213
214/* XXXKYMA: Must be called with interrupts disabled */
215/* set flush_dcache_mask == 0 if no dcache flush required */
216int
217kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
218 unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
219{
220 unsigned long flags;
221 unsigned long old_entryhi;
222 volatile int idx;
223
224 local_irq_save(flags);
225
226
227 old_entryhi = read_c0_entryhi();
228 write_c0_entryhi(entryhi);
229 mtc0_tlbw_hazard();
230
231 tlb_probe();
232 tlb_probe_hazard();
233 idx = read_c0_index();
234
235 if (idx > current_cpu_data.tlbsize) {
236 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
237 kvm_mips_dump_host_tlbs();
238 return -1;
239 }
240
241 if (idx < 0) {
242 idx = read_c0_random() % current_cpu_data.tlbsize;
243 write_c0_index(idx);
244 mtc0_tlbw_hazard();
245 }
246 write_c0_entrylo0(entrylo0);
247 write_c0_entrylo1(entrylo1);
248 mtc0_tlbw_hazard();
249
250 tlb_write_indexed();
251 tlbw_use_hazard();
252
253#ifdef DEBUG
254 if (debug) {
255 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
256 "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
257 vcpu->arch.pc, idx, read_c0_entryhi(),
258 read_c0_entrylo0(), read_c0_entrylo1());
259 }
260#endif
261
262 /* Flush D-cache */
263 if (flush_dcache_mask) {
264 if (entrylo0 & MIPS3_PG_V) {
265 ++vcpu->stat.flush_dcache_exits;
266 flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
267 }
268 if (entrylo1 & MIPS3_PG_V) {
269 ++vcpu->stat.flush_dcache_exits;
270 flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
271 (0x1 << PAGE_SHIFT));
272 }
273 }
274
275 /* Restore old ASID */
276 write_c0_entryhi(old_entryhi);
277 mtc0_tlbw_hazard();
278 tlbw_use_hazard();
279 local_irq_restore(flags);
280 return 0;
281}
282
283
284/* XXXKYMA: Must be called with interrupts disabled */
285int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
286 struct kvm_vcpu *vcpu)
287{
288 gfn_t gfn;
289 pfn_t pfn0, pfn1;
290 unsigned long vaddr = 0;
291 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
292 int even;
293 struct kvm *kvm = vcpu->kvm;
294 const int flush_dcache_mask = 0;
295
296
297 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
298 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
299 kvm_mips_dump_host_tlbs();
300 return -1;
301 }
302
303 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
304 if (gfn >= kvm->arch.guest_pmap_npages) {
305 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
306 gfn, badvaddr);
307 kvm_mips_dump_host_tlbs();
308 return -1;
309 }
310 even = !(gfn & 0x1);
311 vaddr = badvaddr & (PAGE_MASK << 1);
312
313 kvm_mips_map_page(vcpu->kvm, gfn);
314 kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1);
315
316 if (even) {
317 pfn0 = kvm->arch.guest_pmap[gfn];
318 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
319 } else {
320 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
321 pfn1 = kvm->arch.guest_pmap[gfn];
322 }
323
324 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
325 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
326 (0x1 << 1);
327 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
328 (0x1 << 1);
329
330 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
331 flush_dcache_mask);
332}
333
334int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
335 struct kvm_vcpu *vcpu)
336{
337 pfn_t pfn0, pfn1;
338 unsigned long flags, old_entryhi = 0, vaddr = 0;
339 unsigned long entrylo0 = 0, entrylo1 = 0;
340
341
342 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
343 pfn1 = 0;
344 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
345 (0x1 << 1);
346 entrylo1 = 0;
347
348 local_irq_save(flags);
349
350 old_entryhi = read_c0_entryhi();
351 vaddr = badvaddr & (PAGE_MASK << 1);
352 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
353 mtc0_tlbw_hazard();
354 write_c0_entrylo0(entrylo0);
355 mtc0_tlbw_hazard();
356 write_c0_entrylo1(entrylo1);
357 mtc0_tlbw_hazard();
358 write_c0_index(kvm_mips_get_commpage_asid(vcpu));
359 mtc0_tlbw_hazard();
360 tlb_write_indexed();
361 mtc0_tlbw_hazard();
362 tlbw_use_hazard();
363
364#ifdef DEBUG
365 kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
366 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
367 read_c0_entrylo0(), read_c0_entrylo1());
368#endif
369
370 /* Restore old ASID */
371 write_c0_entryhi(old_entryhi);
372 mtc0_tlbw_hazard();
373 tlbw_use_hazard();
374 local_irq_restore(flags);
375
376 return 0;
377}
378
379int
380kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
381 struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
382{
383 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
384 struct kvm *kvm = vcpu->kvm;
385 pfn_t pfn0, pfn1;
386
387
388 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
389 pfn0 = 0;
390 pfn1 = 0;
391 } else {
392 kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT);
393 kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT);
394
395 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
396 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
397 }
398
399 if (hpa0)
400 *hpa0 = pfn0 << PAGE_SHIFT;
401
402 if (hpa1)
403 *hpa1 = pfn1 << PAGE_SHIFT;
404
405 /* Get attributes from the Guest TLB */
406 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
407 kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
408 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
409 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
410 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
411 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
412
413#ifdef DEBUG
414 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
415 tlb->tlb_lo0, tlb->tlb_lo1);
416#endif
417
418 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
419 tlb->tlb_mask);
420}
421
422int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
423{
424 int i;
425 int index = -1;
426 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
427
428
429 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
430 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
431 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) {
432 index = i;
433 break;
434 }
435 }
436
437#ifdef DEBUG
438 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
439 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
440#endif
441
442 return index;
443}
444
445int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
446{
447 unsigned long old_entryhi, flags;
448 volatile int idx;
449
450
451 local_irq_save(flags);
452
453 old_entryhi = read_c0_entryhi();
454
455 if (KVM_GUEST_KERNEL_MODE(vcpu))
456 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
457 else {
458 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
459 }
460
461 mtc0_tlbw_hazard();
462
463 tlb_probe();
464 tlb_probe_hazard();
465 idx = read_c0_index();
466
467 /* Restore old ASID */
468 write_c0_entryhi(old_entryhi);
469 mtc0_tlbw_hazard();
470 tlbw_use_hazard();
471
472 local_irq_restore(flags);
473
474#ifdef DEBUG
475 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
476#endif
477
478 return idx;
479}
480
481int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
482{
483 int idx;
484 unsigned long flags, old_entryhi;
485
486 local_irq_save(flags);
487
488
489 old_entryhi = read_c0_entryhi();
490
491 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
492 mtc0_tlbw_hazard();
493
494 tlb_probe();
495 tlb_probe_hazard();
496 idx = read_c0_index();
497
498 if (idx >= current_cpu_data.tlbsize)
499 BUG();
500
501 if (idx > 0) {
502 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
503 mtc0_tlbw_hazard();
504
505 write_c0_entrylo0(0);
506 mtc0_tlbw_hazard();
507
508 write_c0_entrylo1(0);
509 mtc0_tlbw_hazard();
510
511 tlb_write_indexed();
512 mtc0_tlbw_hazard();
513 }
514
515 write_c0_entryhi(old_entryhi);
516 mtc0_tlbw_hazard();
517 tlbw_use_hazard();
518
519 local_irq_restore(flags);
520
521#ifdef DEBUG
522 if (idx > 0) {
523 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
524 (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
525 }
526#endif
527
528 return 0;
529}
530
531/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
532int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
533{
534 unsigned long flags, old_entryhi;
535
536 if (index >= current_cpu_data.tlbsize)
537 BUG();
538
539 local_irq_save(flags);
540
541
542 old_entryhi = read_c0_entryhi();
543
544 write_c0_entryhi(UNIQUE_ENTRYHI(index));
545 mtc0_tlbw_hazard();
546
547 write_c0_index(index);
548 mtc0_tlbw_hazard();
549
550 write_c0_entrylo0(0);
551 mtc0_tlbw_hazard();
552
553 write_c0_entrylo1(0);
554 mtc0_tlbw_hazard();
555
556 tlb_write_indexed();
557 mtc0_tlbw_hazard();
558 tlbw_use_hazard();
559
560 write_c0_entryhi(old_entryhi);
561 mtc0_tlbw_hazard();
562 tlbw_use_hazard();
563
564 local_irq_restore(flags);
565
566 return 0;
567}
568
569void kvm_mips_flush_host_tlb(int skip_kseg0)
570{
571 unsigned long flags;
572 unsigned long old_entryhi, entryhi;
573 unsigned long old_pagemask;
574 int entry = 0;
575 int maxentry = current_cpu_data.tlbsize;
576
577
578 local_irq_save(flags);
579
580 old_entryhi = read_c0_entryhi();
581 old_pagemask = read_c0_pagemask();
582
583 /* Blast 'em all away. */
584 for (entry = 0; entry < maxentry; entry++) {
585
586 write_c0_index(entry);
587 mtc0_tlbw_hazard();
588
589 if (skip_kseg0) {
590 tlb_read();
591 tlbw_use_hazard();
592
593 entryhi = read_c0_entryhi();
594
595 /* Don't blow away guest kernel entries */
596 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
597 continue;
598 }
599 }
600
601 /* Make sure all entries differ. */
602 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
603 mtc0_tlbw_hazard();
604 write_c0_entrylo0(0);
605 mtc0_tlbw_hazard();
606 write_c0_entrylo1(0);
607 mtc0_tlbw_hazard();
608
609 tlb_write_indexed();
610 mtc0_tlbw_hazard();
611 }
612
613 tlbw_use_hazard();
614
615 write_c0_entryhi(old_entryhi);
616 write_c0_pagemask(old_pagemask);
617 mtc0_tlbw_hazard();
618 tlbw_use_hazard();
619
620 local_irq_restore(flags);
621}
622
623void
624kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
625 struct kvm_vcpu *vcpu)
626{
627 unsigned long asid = asid_cache(cpu);
628
629 if (!(ASID_MASK(ASID_INC(asid)))) {
630 if (cpu_has_vtag_icache) {
631 flush_icache_all();
632 }
633
634 kvm_local_flush_tlb_all(); /* start new asid cycle */
635
636 if (!asid) /* fix version if needed */
637 asid = ASID_FIRST_VERSION;
638 }
639
640 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
641}
642
643void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
644{
645 unsigned long flags;
646 unsigned long old_entryhi;
647 unsigned long old_pagemask;
648 int entry = 0;
649 int cpu = smp_processor_id();
650
651 local_irq_save(flags);
652
653 old_entryhi = read_c0_entryhi();
654 old_pagemask = read_c0_pagemask();
655
656 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
657 write_c0_index(entry);
658 mtc0_tlbw_hazard();
659 tlb_read();
660 tlbw_use_hazard();
661
662 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
663 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
664 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
665 vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
666 }
667
668 write_c0_entryhi(old_entryhi);
669 write_c0_pagemask(old_pagemask);
670 mtc0_tlbw_hazard();
671
672 local_irq_restore(flags);
673
674}
675
676void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
677{
678 unsigned long flags;
679 unsigned long old_ctx;
680 int entry;
681 int cpu = smp_processor_id();
682
683 local_irq_save(flags);
684
685 old_ctx = read_c0_entryhi();
686
687 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
688 write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
689 mtc0_tlbw_hazard();
690 write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
691 write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
692
693 write_c0_index(entry);
694 mtc0_tlbw_hazard();
695
696 tlb_write_indexed();
697 tlbw_use_hazard();
698 }
699
700 tlbw_use_hazard();
701 write_c0_entryhi(old_ctx);
702 mtc0_tlbw_hazard();
703 local_irq_restore(flags);
704}
705
706
707void kvm_local_flush_tlb_all(void)
708{
709 unsigned long flags;
710 unsigned long old_ctx;
711 int entry = 0;
712
713 local_irq_save(flags);
714 /* Save old context and create impossible VPN2 value */
715 old_ctx = read_c0_entryhi();
716 write_c0_entrylo0(0);
717 write_c0_entrylo1(0);
718
719 /* Blast 'em all away. */
720 while (entry < current_cpu_data.tlbsize) {
721 /* Make sure all entries differ. */
722 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
723 write_c0_index(entry);
724 mtc0_tlbw_hazard();
725 tlb_write_indexed();
726 entry++;
727 }
728 tlbw_use_hazard();
729 write_c0_entryhi(old_ctx);
730 mtc0_tlbw_hazard();
731
732 local_irq_restore(flags);
733}
734
735void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
736{
737 int cpu, entry;
738
739 for_each_possible_cpu(cpu) {
740 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
741 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
742 UNIQUE_ENTRYHI(entry);
743 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
744 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
745 vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
746 read_c0_pagemask();
747#ifdef DEBUG
748 kvm_debug
749 ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
750 cpu, entry,
751 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
752 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
753 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
754#endif
755 }
756 }
757}
758
759/* Restore ASID once we are scheduled back after preemption */
760void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
761{
762 unsigned long flags;
763 int newasid = 0;
764
765#ifdef DEBUG
766 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
767#endif
768
769 /* Alocate new kernel and user ASIDs if needed */
770
771 local_irq_save(flags);
772
773 if (((vcpu->arch.
774 guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
775 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
776 vcpu->arch.guest_kernel_asid[cpu] =
777 vcpu->arch.guest_kernel_mm.context.asid[cpu];
778 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
779 vcpu->arch.guest_user_asid[cpu] =
780 vcpu->arch.guest_user_mm.context.asid[cpu];
781 newasid++;
782
783 kvm_info("[%d]: cpu_context: %#lx\n", cpu,
784 cpu_context(cpu, current->mm));
785 kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
786 cpu, vcpu->arch.guest_kernel_asid[cpu]);
787 kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
788 vcpu->arch.guest_user_asid[cpu]);
789 }
790
791 if (vcpu->arch.last_sched_cpu != cpu) {
792 kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
793 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
794 }
795
796 /* Only reload shadow host TLB if new ASIDs haven't been allocated */
797#if 0
798 if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
799 kvm_mips_flush_host_tlb(0);
800 kvm_shadow_tlb_load(vcpu);
801 }
802#endif
803
804 if (!newasid) {
805 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
806 if (current->flags & PF_VCPU) {
807 write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi));
808 ehb();
809 }
810 } else {
811 /* New ASIDs were allocated for the VM */
812
813 /* Were we in guest context? If so then the pre-empted ASID is no longer
814 * valid, we need to set it to what it should be based on the mode of
815 * the Guest (Kernel/User)
816 */
817 if (current->flags & PF_VCPU) {
818 if (KVM_GUEST_KERNEL_MODE(vcpu))
819 write_c0_entryhi(ASID_MASK(vcpu->arch.
820 guest_kernel_asid[cpu]));
821 else
822 write_c0_entryhi(ASID_MASK(vcpu->arch.
823 guest_user_asid[cpu]));
824 ehb();
825 }
826 }
827
828 local_irq_restore(flags);
829
830}
831
832/* ASID can change if another task is scheduled during preemption */
833void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
834{
835 unsigned long flags;
836 uint32_t cpu;
837
838 local_irq_save(flags);
839
840 cpu = smp_processor_id();
841
842
843 vcpu->arch.preempt_entryhi = read_c0_entryhi();
844 vcpu->arch.last_sched_cpu = cpu;
845
846#if 0
847 if ((atomic_read(&kvm_mips_instance) > 1)) {
848 kvm_shadow_tlb_put(vcpu);
849 }
850#endif
851
852 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
853 ASID_VERSION_MASK)) {
854 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
855 cpu_context(cpu, current->mm));
856 drop_mmu_context(current->mm, cpu);
857 }
858 write_c0_entryhi(cpu_asid(cpu, current->mm));
859 ehb();
860
861 local_irq_restore(flags);
862}
863
864uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
865{
866 struct mips_coproc *cop0 = vcpu->arch.cop0;
867 unsigned long paddr, flags;
868 uint32_t inst;
869 int index;
870
871 if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
872 KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
873 local_irq_save(flags);
874 index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
875 if (index >= 0) {
876 inst = *(opc);
877 } else {
878 index =
879 kvm_mips_guest_tlb_lookup(vcpu,
880 ((unsigned long) opc & VPN2_MASK)
881 |
882 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
883 if (index < 0) {
884 kvm_err
885 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
886 __func__, opc, vcpu, read_c0_entryhi());
887 kvm_mips_dump_host_tlbs();
888 local_irq_restore(flags);
889 return KVM_INVALID_INST;
890 }
891 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
892 &vcpu->arch.
893 guest_tlb[index],
894 NULL, NULL);
895 inst = *(opc);
896 }
897 local_irq_restore(flags);
898 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
899 paddr =
900 kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
901 (unsigned long) opc);
902 inst = *(uint32_t *) CKSEG0ADDR(paddr);
903 } else {
904 kvm_err("%s: illegal address: %p\n", __func__, opc);
905 return KVM_INVALID_INST;
906 }
907
908 return inst;
909}
910
911EXPORT_SYMBOL(kvm_local_flush_tlb_all);
912EXPORT_SYMBOL(kvm_shadow_tlb_put);
913EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
914EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
915EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
916EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
917EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
918EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
919EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
920EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
921EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
922EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
923EXPORT_SYMBOL(kvm_shadow_tlb_load);
924EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
925EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
926EXPORT_SYMBOL(kvm_get_inst);
927EXPORT_SYMBOL(kvm_arch_vcpu_load);
928EXPORT_SYMBOL(kvm_arch_vcpu_put);
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
new file mode 100644
index 000000000000..466aeef044bd
--- /dev/null
+++ b/arch/mips/kvm/kvm_trap_emul.c
@@ -0,0 +1,482 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/vmalloc.h>
16
17#include <linux/kvm_host.h>
18
19#include "kvm_mips_opcode.h"
20#include "kvm_mips_int.h"
21
22static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
23{
24 gpa_t gpa;
25 uint32_t kseg = KSEGX(gva);
26
27 if ((kseg == CKSEG0) || (kseg == CKSEG1))
28 gpa = CPHYSADDR(gva);
29 else {
30 printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
31 kvm_mips_dump_host_tlbs();
32 gpa = KVM_INVALID_ADDR;
33 }
34
35#ifdef DEBUG
36 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
37#endif
38
39 return gpa;
40}
41
42
43static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
44{
45 struct kvm_run *run = vcpu->run;
46 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
47 unsigned long cause = vcpu->arch.host_cp0_cause;
48 enum emulation_result er = EMULATE_DONE;
49 int ret = RESUME_GUEST;
50
51 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
52 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
53 } else
54 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
55
56 switch (er) {
57 case EMULATE_DONE:
58 ret = RESUME_GUEST;
59 break;
60
61 case EMULATE_FAIL:
62 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
63 ret = RESUME_HOST;
64 break;
65
66 case EMULATE_WAIT:
67 run->exit_reason = KVM_EXIT_INTR;
68 ret = RESUME_HOST;
69 break;
70
71 default:
72 BUG();
73 }
74 return ret;
75}
76
77static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
78{
79 struct kvm_run *run = vcpu->run;
80 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
81 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
82 unsigned long cause = vcpu->arch.host_cp0_cause;
83 enum emulation_result er = EMULATE_DONE;
84 int ret = RESUME_GUEST;
85
86 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
87 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
88#ifdef DEBUG
89 kvm_debug
90 ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
91 cause, opc, badvaddr);
92#endif
93 er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
94
95 if (er == EMULATE_DONE)
96 ret = RESUME_GUEST;
97 else {
98 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
99 ret = RESUME_HOST;
100 }
101 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
102 /* XXXKYMA: The guest kernel does not expect to get this fault when we are not
103 * using HIGHMEM. Need to address this in a HIGHMEM kernel
104 */
105 printk
106 ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
107 cause, opc, badvaddr);
108 kvm_mips_dump_host_tlbs();
109 kvm_arch_vcpu_dump_regs(vcpu);
110 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
111 ret = RESUME_HOST;
112 } else {
113 printk
114 ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
115 cause, opc, badvaddr);
116 kvm_mips_dump_host_tlbs();
117 kvm_arch_vcpu_dump_regs(vcpu);
118 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
119 ret = RESUME_HOST;
120 }
121 return ret;
122}
123
124static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
125{
126 struct kvm_run *run = vcpu->run;
127 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
128 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
129 unsigned long cause = vcpu->arch.host_cp0_cause;
130 enum emulation_result er = EMULATE_DONE;
131 int ret = RESUME_GUEST;
132
133 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
134 && KVM_GUEST_KERNEL_MODE(vcpu)) {
135 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
136 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
137 ret = RESUME_HOST;
138 }
139 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
140 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
141#ifdef DEBUG
142 kvm_debug
143 ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
144 cause, opc, badvaddr);
145#endif
146 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
147 if (er == EMULATE_DONE)
148 ret = RESUME_GUEST;
149 else {
150 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
151 ret = RESUME_HOST;
152 }
153 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
154 /* All KSEG0 faults are handled by KVM, as the guest kernel does not
155 * expect to ever get them
156 */
157 if (kvm_mips_handle_kseg0_tlb_fault
158 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
159 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
160 ret = RESUME_HOST;
161 }
162 } else {
163 kvm_err
164 ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
165 cause, opc, badvaddr);
166 kvm_mips_dump_host_tlbs();
167 kvm_arch_vcpu_dump_regs(vcpu);
168 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
169 ret = RESUME_HOST;
170 }
171 return ret;
172}
173
174static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
175{
176 struct kvm_run *run = vcpu->run;
177 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
178 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
179 unsigned long cause = vcpu->arch.host_cp0_cause;
180 enum emulation_result er = EMULATE_DONE;
181 int ret = RESUME_GUEST;
182
183 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
184 && KVM_GUEST_KERNEL_MODE(vcpu)) {
185 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
186 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
187 ret = RESUME_HOST;
188 }
189 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
190 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
191#ifdef DEBUG
192 kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
193 vcpu->arch.pc, badvaddr);
194#endif
195
196 /* User Address (UA) fault, this could happen if
197 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
198 * case we pass on the fault to the guest kernel and let it handle it.
199 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
200 * case we inject the TLB from the Guest TLB into the shadow host TLB
201 */
202
203 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
204 if (er == EMULATE_DONE)
205 ret = RESUME_GUEST;
206 else {
207 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
208 ret = RESUME_HOST;
209 }
210 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
211 if (kvm_mips_handle_kseg0_tlb_fault
212 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
213 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
214 ret = RESUME_HOST;
215 }
216 } else {
217 printk
218 ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
219 cause, opc, badvaddr);
220 kvm_mips_dump_host_tlbs();
221 kvm_arch_vcpu_dump_regs(vcpu);
222 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
223 ret = RESUME_HOST;
224 }
225 return ret;
226}
227
228static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
229{
230 struct kvm_run *run = vcpu->run;
231 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
232 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
233 unsigned long cause = vcpu->arch.host_cp0_cause;
234 enum emulation_result er = EMULATE_DONE;
235 int ret = RESUME_GUEST;
236
237 if (KVM_GUEST_KERNEL_MODE(vcpu)
238 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
239#ifdef DEBUG
240 kvm_debug("Emulate Store to MMIO space\n");
241#endif
242 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
243 if (er == EMULATE_FAIL) {
244 printk("Emulate Store to MMIO space failed\n");
245 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
246 ret = RESUME_HOST;
247 } else {
248 run->exit_reason = KVM_EXIT_MMIO;
249 ret = RESUME_HOST;
250 }
251 } else {
252 printk
253 ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
254 cause, opc, badvaddr);
255 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
256 ret = RESUME_HOST;
257 }
258 return ret;
259}
260
261static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
262{
263 struct kvm_run *run = vcpu->run;
264 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
265 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
266 unsigned long cause = vcpu->arch.host_cp0_cause;
267 enum emulation_result er = EMULATE_DONE;
268 int ret = RESUME_GUEST;
269
270 if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
271#ifdef DEBUG
272 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
273#endif
274 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
275 if (er == EMULATE_FAIL) {
276 printk("Emulate Load from MMIO space failed\n");
277 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
278 ret = RESUME_HOST;
279 } else {
280 run->exit_reason = KVM_EXIT_MMIO;
281 ret = RESUME_HOST;
282 }
283 } else {
284 printk
285 ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
286 cause, opc, badvaddr);
287 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
288 ret = RESUME_HOST;
289 er = EMULATE_FAIL;
290 }
291 return ret;
292}
293
294static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
295{
296 struct kvm_run *run = vcpu->run;
297 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
298 unsigned long cause = vcpu->arch.host_cp0_cause;
299 enum emulation_result er = EMULATE_DONE;
300 int ret = RESUME_GUEST;
301
302 er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
303 if (er == EMULATE_DONE)
304 ret = RESUME_GUEST;
305 else {
306 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
307 ret = RESUME_HOST;
308 }
309 return ret;
310}
311
312static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
313{
314 struct kvm_run *run = vcpu->run;
315 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
316 unsigned long cause = vcpu->arch.host_cp0_cause;
317 enum emulation_result er = EMULATE_DONE;
318 int ret = RESUME_GUEST;
319
320 er = kvm_mips_handle_ri(cause, opc, run, vcpu);
321 if (er == EMULATE_DONE)
322 ret = RESUME_GUEST;
323 else {
324 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
325 ret = RESUME_HOST;
326 }
327 return ret;
328}
329
330static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
331{
332 struct kvm_run *run = vcpu->run;
333 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
334 unsigned long cause = vcpu->arch.host_cp0_cause;
335 enum emulation_result er = EMULATE_DONE;
336 int ret = RESUME_GUEST;
337
338 er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
339 if (er == EMULATE_DONE)
340 ret = RESUME_GUEST;
341 else {
342 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
343 ret = RESUME_HOST;
344 }
345 return ret;
346}
347
348static int
349kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
350{
351 struct mips_coproc *cop0 = vcpu->arch.cop0;
352
353 kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]);
354 kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]);
355 kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]);
356 kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]);
357 kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]);
358
359 kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]);
360 kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]);
361 kvm_write_c0_guest_pagemask(cop0,
362 regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]);
363 kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]);
364 kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]);
365
366 return 0;
367}
368
369static int
370kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
371{
372 struct mips_coproc *cop0 = vcpu->arch.cop0;
373
374 regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0);
375 regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0);
376 regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0);
377 regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0);
378 regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0);
379
380 regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0);
381 regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0);
382 regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] =
383 kvm_read_c0_guest_pagemask(cop0);
384 regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0);
385 regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0);
386
387 regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0);
388 regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0);
389 regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0);
390 regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0);
391 regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0);
392
393 return 0;
394}
395
396static int kvm_trap_emul_vm_init(struct kvm *kvm)
397{
398 return 0;
399}
400
401static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
402{
403 return 0;
404}
405
406static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
407{
408 struct mips_coproc *cop0 = vcpu->arch.cop0;
409 uint32_t config1;
410 int vcpu_id = vcpu->vcpu_id;
411
412 /* Arch specific stuff, set up config registers properly so that the
413 * guest will come up as expected, for now we simulate a
414 * MIPS 24kc
415 */
416 kvm_write_c0_guest_prid(cop0, 0x00019300);
417 kvm_write_c0_guest_config(cop0,
418 MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
419 (MMU_TYPE_R4000 << CP0C0_MT));
420
421 /* Read the cache characteristics from the host Config1 Register */
422 config1 = (read_c0_config1() & ~0x7f);
423
424 /* Set up MMU size */
425 config1 &= ~(0x3f << 25);
426 config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
427
428 /* We unset some bits that we aren't emulating */
429 config1 &=
430 ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
431 (1 << CP0C1_WR) | (1 << CP0C1_CA));
432 kvm_write_c0_guest_config1(cop0, config1);
433
434 kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
435 /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
436 kvm_write_c0_guest_config3(cop0,
437 MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 <<
438 CP0C3_ULRI));
439
440 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
441 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
442
443 /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */
444 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
445
446 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
447 kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
448
449 return 0;
450}
451
452static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
453 /* exit handlers */
454 .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
455 .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
456 .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
457 .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
458 .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
459 .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
460 .handle_syscall = kvm_trap_emul_handle_syscall,
461 .handle_res_inst = kvm_trap_emul_handle_res_inst,
462 .handle_break = kvm_trap_emul_handle_break,
463
464 .vm_init = kvm_trap_emul_vm_init,
465 .vcpu_init = kvm_trap_emul_vcpu_init,
466 .vcpu_setup = kvm_trap_emul_vcpu_setup,
467 .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
468 .queue_timer_int = kvm_mips_queue_timer_int_cb,
469 .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
470 .queue_io_int = kvm_mips_queue_io_int_cb,
471 .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
472 .irq_deliver = kvm_mips_irq_deliver_cb,
473 .irq_clear = kvm_mips_irq_clear_cb,
474 .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs,
475 .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs,
476};
477
478int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
479{
480 *install_callbacks = &kvm_trap_emul_callbacks;
481 return 0;
482}
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
new file mode 100644
index 000000000000..bc9e0f406c08
--- /dev/null
+++ b/arch/mips/kvm/trace.h
@@ -0,0 +1,46 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
11#define _TRACE_KVM_H
12
13#include <linux/tracepoint.h>
14
15#undef TRACE_SYSTEM
16#define TRACE_SYSTEM kvm
17#define TRACE_INCLUDE_PATH .
18#define TRACE_INCLUDE_FILE trace
19
20/*
21 * Tracepoints for VM eists
22 */
23extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
24
25TRACE_EVENT(kvm_exit,
26 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
27 TP_ARGS(vcpu, reason),
28 TP_STRUCT__entry(
29 __field(struct kvm_vcpu *, vcpu)
30 __field(unsigned int, reason)
31 ),
32
33 TP_fast_assign(
34 __entry->vcpu = vcpu;
35 __entry->reason = reason;
36 ),
37
38 TP_printk("[%s]PC: 0x%08lx",
39 kvm_mips_exit_types_str[__entry->reason],
40 __entry->vcpu->arch.pc)
41);
42
43#endif /* _TRACE_KVM_H */
44
45/* This part must be outside protection */
46#include <trace/define_trace.h>
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
index a64daee740ee..3b2a1e78a543 100644
--- a/arch/mips/lib/bitops.c
+++ b/arch/mips/lib/bitops.c
@@ -19,7 +19,7 @@
19 */ 19 */
20void __mips_set_bit(unsigned long nr, volatile unsigned long *addr) 20void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
21{ 21{
22 volatile unsigned long *a = addr; 22 unsigned long *a = (unsigned long *)addr;
23 unsigned bit = nr & SZLONG_MASK; 23 unsigned bit = nr & SZLONG_MASK;
24 unsigned long mask; 24 unsigned long mask;
25 unsigned long flags; 25 unsigned long flags;
@@ -41,7 +41,7 @@ EXPORT_SYMBOL(__mips_set_bit);
41 */ 41 */
42void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr) 42void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
43{ 43{
44 volatile unsigned long *a = addr; 44 unsigned long *a = (unsigned long *)addr;
45 unsigned bit = nr & SZLONG_MASK; 45 unsigned bit = nr & SZLONG_MASK;
46 unsigned long mask; 46 unsigned long mask;
47 unsigned long flags; 47 unsigned long flags;
@@ -63,7 +63,7 @@ EXPORT_SYMBOL(__mips_clear_bit);
63 */ 63 */
64void __mips_change_bit(unsigned long nr, volatile unsigned long *addr) 64void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
65{ 65{
66 volatile unsigned long *a = addr; 66 unsigned long *a = (unsigned long *)addr;
67 unsigned bit = nr & SZLONG_MASK; 67 unsigned bit = nr & SZLONG_MASK;
68 unsigned long mask; 68 unsigned long mask;
69 unsigned long flags; 69 unsigned long flags;
@@ -86,7 +86,7 @@ EXPORT_SYMBOL(__mips_change_bit);
86int __mips_test_and_set_bit(unsigned long nr, 86int __mips_test_and_set_bit(unsigned long nr,
87 volatile unsigned long *addr) 87 volatile unsigned long *addr)
88{ 88{
89 volatile unsigned long *a = addr; 89 unsigned long *a = (unsigned long *)addr;
90 unsigned bit = nr & SZLONG_MASK; 90 unsigned bit = nr & SZLONG_MASK;
91 unsigned long mask; 91 unsigned long mask;
92 unsigned long flags; 92 unsigned long flags;
@@ -112,7 +112,7 @@ EXPORT_SYMBOL(__mips_test_and_set_bit);
112int __mips_test_and_set_bit_lock(unsigned long nr, 112int __mips_test_and_set_bit_lock(unsigned long nr,
113 volatile unsigned long *addr) 113 volatile unsigned long *addr)
114{ 114{
115 volatile unsigned long *a = addr; 115 unsigned long *a = (unsigned long *)addr;
116 unsigned bit = nr & SZLONG_MASK; 116 unsigned bit = nr & SZLONG_MASK;
117 unsigned long mask; 117 unsigned long mask;
118 unsigned long flags; 118 unsigned long flags;
@@ -137,7 +137,7 @@ EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
137 */ 137 */
138int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) 138int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
139{ 139{
140 volatile unsigned long *a = addr; 140 unsigned long *a = (unsigned long *)addr;
141 unsigned bit = nr & SZLONG_MASK; 141 unsigned bit = nr & SZLONG_MASK;
142 unsigned long mask; 142 unsigned long mask;
143 unsigned long flags; 143 unsigned long flags;
@@ -162,7 +162,7 @@ EXPORT_SYMBOL(__mips_test_and_clear_bit);
162 */ 162 */
163int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) 163int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
164{ 164{
165 volatile unsigned long *a = addr; 165 unsigned long *a = (unsigned long *)addr;
166 unsigned bit = nr & SZLONG_MASK; 166 unsigned bit = nr & SZLONG_MASK;
167 unsigned long mask; 167 unsigned long mask;
168 unsigned long flags; 168 unsigned long flags;
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 32b9f21bfd85..8a12d00908e0 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -11,6 +11,7 @@
11#include <asm/page.h> 11#include <asm/page.h>
12#include <asm/pgtable.h> 12#include <asm/pgtable.h>
13#include <asm/tlbdebug.h> 13#include <asm/tlbdebug.h>
14#include <asm/mmu_context.h>
14 15
15static inline const char *msk2str(unsigned int mask) 16static inline const char *msk2str(unsigned int mask)
16{ 17{
@@ -55,7 +56,7 @@ static void dump_tlb(int first, int last)
55 s_pagemask = read_c0_pagemask(); 56 s_pagemask = read_c0_pagemask();
56 s_entryhi = read_c0_entryhi(); 57 s_entryhi = read_c0_entryhi();
57 s_index = read_c0_index(); 58 s_index = read_c0_index();
58 asid = s_entryhi & 0xff; 59 asid = ASID_MASK(s_entryhi);
59 60
60 for (i = first; i <= last; i++) { 61 for (i = first; i <= last; i++) {
61 write_c0_index(i); 62 write_c0_index(i);
@@ -85,7 +86,7 @@ static void dump_tlb(int first, int last)
85 86
86 printk("va=%0*lx asid=%02lx\n", 87 printk("va=%0*lx asid=%02lx\n",
87 width, (entryhi & ~0x1fffUL), 88 width, (entryhi & ~0x1fffUL),
88 entryhi & 0xff); 89 ASID_MASK(entryhi));
89 printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ", 90 printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ",
90 width, 91 width,
91 (entrylo0 << 6) & PAGE_MASK, c0, 92 (entrylo0 << 6) & PAGE_MASK, c0,
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 053d3b0b0317..0580194e7402 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -5,7 +5,8 @@
5 * 5 *
6 * Copyright (C) 1998, 1999, 2000 by Ralf Baechle 6 * Copyright (C) 1998, 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2007 Maciej W. Rozycki 8 * Copyright (C) 2007 by Maciej W. Rozycki
9 * Copyright (C) 2011, 2012 MIPS Technologies, Inc.
9 */ 10 */
10#include <asm/asm.h> 11#include <asm/asm.h>
11#include <asm/asm-offsets.h> 12#include <asm/asm-offsets.h>
@@ -19,6 +20,20 @@
19#define LONG_S_R sdr 20#define LONG_S_R sdr
20#endif 21#endif
21 22
23#ifdef CONFIG_CPU_MICROMIPS
24#define STORSIZE (LONGSIZE * 2)
25#define STORMASK (STORSIZE - 1)
26#define FILL64RG t8
27#define FILLPTRG t7
28#undef LONG_S
29#define LONG_S LONG_SP
30#else
31#define STORSIZE LONGSIZE
32#define STORMASK LONGMASK
33#define FILL64RG a1
34#define FILLPTRG t0
35#endif
36
22#define EX(insn,reg,addr,handler) \ 37#define EX(insn,reg,addr,handler) \
239: insn reg, addr; \ 389: insn reg, addr; \
24 .section __ex_table,"a"; \ 39 .section __ex_table,"a"; \
@@ -26,23 +41,25 @@
26 .previous 41 .previous
27 42
28 .macro f_fill64 dst, offset, val, fixup 43 .macro f_fill64 dst, offset, val, fixup
29 EX(LONG_S, \val, (\offset + 0 * LONGSIZE)(\dst), \fixup) 44 EX(LONG_S, \val, (\offset + 0 * STORSIZE)(\dst), \fixup)
30 EX(LONG_S, \val, (\offset + 1 * LONGSIZE)(\dst), \fixup) 45 EX(LONG_S, \val, (\offset + 1 * STORSIZE)(\dst), \fixup)
31 EX(LONG_S, \val, (\offset + 2 * LONGSIZE)(\dst), \fixup) 46 EX(LONG_S, \val, (\offset + 2 * STORSIZE)(\dst), \fixup)
32 EX(LONG_S, \val, (\offset + 3 * LONGSIZE)(\dst), \fixup) 47 EX(LONG_S, \val, (\offset + 3 * STORSIZE)(\dst), \fixup)
33 EX(LONG_S, \val, (\offset + 4 * LONGSIZE)(\dst), \fixup) 48#if ((defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) || !defined(CONFIG_CPU_MICROMIPS))
34 EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup) 49 EX(LONG_S, \val, (\offset + 4 * STORSIZE)(\dst), \fixup)
35 EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup) 50 EX(LONG_S, \val, (\offset + 5 * STORSIZE)(\dst), \fixup)
36 EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup) 51 EX(LONG_S, \val, (\offset + 6 * STORSIZE)(\dst), \fixup)
37#if LONGSIZE == 4 52 EX(LONG_S, \val, (\offset + 7 * STORSIZE)(\dst), \fixup)
38 EX(LONG_S, \val, (\offset + 8 * LONGSIZE)(\dst), \fixup) 53#endif
39 EX(LONG_S, \val, (\offset + 9 * LONGSIZE)(\dst), \fixup) 54#if (!defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4))
40 EX(LONG_S, \val, (\offset + 10 * LONGSIZE)(\dst), \fixup) 55 EX(LONG_S, \val, (\offset + 8 * STORSIZE)(\dst), \fixup)
41 EX(LONG_S, \val, (\offset + 11 * LONGSIZE)(\dst), \fixup) 56 EX(LONG_S, \val, (\offset + 9 * STORSIZE)(\dst), \fixup)
42 EX(LONG_S, \val, (\offset + 12 * LONGSIZE)(\dst), \fixup) 57 EX(LONG_S, \val, (\offset + 10 * STORSIZE)(\dst), \fixup)
43 EX(LONG_S, \val, (\offset + 13 * LONGSIZE)(\dst), \fixup) 58 EX(LONG_S, \val, (\offset + 11 * STORSIZE)(\dst), \fixup)
44 EX(LONG_S, \val, (\offset + 14 * LONGSIZE)(\dst), \fixup) 59 EX(LONG_S, \val, (\offset + 12 * STORSIZE)(\dst), \fixup)
45 EX(LONG_S, \val, (\offset + 15 * LONGSIZE)(\dst), \fixup) 60 EX(LONG_S, \val, (\offset + 13 * STORSIZE)(\dst), \fixup)
61 EX(LONG_S, \val, (\offset + 14 * STORSIZE)(\dst), \fixup)
62 EX(LONG_S, \val, (\offset + 15 * STORSIZE)(\dst), \fixup)
46#endif 63#endif
47 .endm 64 .endm
48 65
@@ -71,16 +88,20 @@ LEAF(memset)
711: 881:
72 89
73FEXPORT(__bzero) 90FEXPORT(__bzero)
74 sltiu t0, a2, LONGSIZE /* very small region? */ 91 sltiu t0, a2, STORSIZE /* very small region? */
75 bnez t0, .Lsmall_memset 92 bnez t0, .Lsmall_memset
76 andi t0, a0, LONGMASK /* aligned? */ 93 andi t0, a0, STORMASK /* aligned? */
77 94
95#ifdef CONFIG_CPU_MICROMIPS
96 move t8, a1 /* used by 'swp' instruction */
97 move t9, a1
98#endif
78#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 99#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
79 beqz t0, 1f 100 beqz t0, 1f
80 PTR_SUBU t0, LONGSIZE /* alignment in bytes */ 101 PTR_SUBU t0, STORSIZE /* alignment in bytes */
81#else 102#else
82 .set noat 103 .set noat
83 li AT, LONGSIZE 104 li AT, STORSIZE
84 beqz t0, 1f 105 beqz t0, 1f
85 PTR_SUBU t0, AT /* alignment in bytes */ 106 PTR_SUBU t0, AT /* alignment in bytes */
86 .set at 107 .set at
@@ -99,24 +120,27 @@ FEXPORT(__bzero)
991: ori t1, a2, 0x3f /* # of full blocks */ 1201: ori t1, a2, 0x3f /* # of full blocks */
100 xori t1, 0x3f 121 xori t1, 0x3f
101 beqz t1, .Lmemset_partial /* no block to fill */ 122 beqz t1, .Lmemset_partial /* no block to fill */
102 andi t0, a2, 0x40-LONGSIZE 123 andi t0, a2, 0x40-STORSIZE
103 124
104 PTR_ADDU t1, a0 /* end address */ 125 PTR_ADDU t1, a0 /* end address */
105 .set reorder 126 .set reorder
1061: PTR_ADDIU a0, 64 1271: PTR_ADDIU a0, 64
107 R10KCBARRIER(0(ra)) 128 R10KCBARRIER(0(ra))
108 f_fill64 a0, -64, a1, .Lfwd_fixup 129 f_fill64 a0, -64, FILL64RG, .Lfwd_fixup
109 bne t1, a0, 1b 130 bne t1, a0, 1b
110 .set noreorder 131 .set noreorder
111 132
112.Lmemset_partial: 133.Lmemset_partial:
113 R10KCBARRIER(0(ra)) 134 R10KCBARRIER(0(ra))
114 PTR_LA t1, 2f /* where to start */ 135 PTR_LA t1, 2f /* where to start */
136#ifdef CONFIG_CPU_MICROMIPS
137 LONG_SRL t7, t0, 1
138#endif
115#if LONGSIZE == 4 139#if LONGSIZE == 4
116 PTR_SUBU t1, t0 140 PTR_SUBU t1, FILLPTRG
117#else 141#else
118 .set noat 142 .set noat
119 LONG_SRL AT, t0, 1 143 LONG_SRL AT, FILLPTRG, 1
120 PTR_SUBU t1, AT 144 PTR_SUBU t1, AT
121 .set at 145 .set at
122#endif 146#endif
@@ -126,9 +150,9 @@ FEXPORT(__bzero)
126 .set push 150 .set push
127 .set noreorder 151 .set noreorder
128 .set nomacro 152 .set nomacro
129 f_fill64 a0, -64, a1, .Lpartial_fixup /* ... but first do longs ... */ 153 f_fill64 a0, -64, FILL64RG, .Lpartial_fixup /* ... but first do longs ... */
1302: .set pop 1542: .set pop
131 andi a2, LONGMASK /* At most one long to go */ 155 andi a2, STORMASK /* At most one long to go */
132 156
133 beqz a2, 1f 157 beqz a2, 1f
134 PTR_ADDU a0, a2 /* What's left */ 158 PTR_ADDU a0, a2 /* What's left */
@@ -169,7 +193,7 @@ FEXPORT(__bzero)
169 193
170.Lpartial_fixup: 194.Lpartial_fixup:
171 PTR_L t0, TI_TASK($28) 195 PTR_L t0, TI_TASK($28)
172 andi a2, LONGMASK 196 andi a2, STORMASK
173 LONG_L t0, THREAD_BUADDR(t0) 197 LONG_L t0, THREAD_BUADDR(t0)
174 LONG_ADDU a2, t1 198 LONG_ADDU a2, t1
175 jr ra 199 jr ra
@@ -177,4 +201,4 @@ FEXPORT(__bzero)
177 201
178.Llast_fixup: 202.Llast_fixup:
179 jr ra 203 jr ra
180 andi v1, a2, LONGMASK 204 andi v1, a2, STORMASK
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index cd160be3ce4d..6807f7172eaf 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -13,6 +13,7 @@
13#include <linux/compiler.h> 13#include <linux/compiler.h>
14#include <linux/preempt.h> 14#include <linux/preempt.h>
15#include <linux/export.h> 15#include <linux/export.h>
16#include <linux/stringify.h>
16 17
17#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) 18#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
18 19
@@ -34,8 +35,11 @@
34 * 35 *
35 * Workaround: mask EXL bit of the result or place a nop before mfc0. 36 * Workaround: mask EXL bit of the result or place a nop before mfc0.
36 */ 37 */
37__asm__( 38notrace void arch_local_irq_disable(void)
38 " .macro arch_local_irq_disable\n" 39{
40 preempt_disable();
41
42 __asm__ __volatile__(
39 " .set push \n" 43 " .set push \n"
40 " .set noat \n" 44 " .set noat \n"
41#ifdef CONFIG_MIPS_MT_SMTC 45#ifdef CONFIG_MIPS_MT_SMTC
@@ -52,108 +56,98 @@ __asm__(
52 " .set noreorder \n" 56 " .set noreorder \n"
53 " mtc0 $1,$12 \n" 57 " mtc0 $1,$12 \n"
54#endif 58#endif
55 " irq_disable_hazard \n" 59 " " __stringify(__irq_disable_hazard) " \n"
56 " .set pop \n" 60 " .set pop \n"
57 " .endm \n"); 61 : /* no outputs */
62 : /* no inputs */
63 : "memory");
58 64
59notrace void arch_local_irq_disable(void)
60{
61 preempt_disable();
62 __asm__ __volatile__(
63 "arch_local_irq_disable"
64 : /* no outputs */
65 : /* no inputs */
66 : "memory");
67 preempt_enable(); 65 preempt_enable();
68} 66}
69EXPORT_SYMBOL(arch_local_irq_disable); 67EXPORT_SYMBOL(arch_local_irq_disable);
70 68
71 69
72__asm__( 70notrace unsigned long arch_local_irq_save(void)
73 " .macro arch_local_irq_save result \n" 71{
72 unsigned long flags;
73
74 preempt_disable();
75
76 __asm__ __volatile__(
74 " .set push \n" 77 " .set push \n"
75 " .set reorder \n" 78 " .set reorder \n"
76 " .set noat \n" 79 " .set noat \n"
77#ifdef CONFIG_MIPS_MT_SMTC 80#ifdef CONFIG_MIPS_MT_SMTC
78 " mfc0 \\result, $2, 1 \n" 81 " mfc0 %[flags], $2, 1 \n"
79 " ori $1, \\result, 0x400 \n" 82 " ori $1, %[flags], 0x400 \n"
80 " .set noreorder \n" 83 " .set noreorder \n"
81 " mtc0 $1, $2, 1 \n" 84 " mtc0 $1, $2, 1 \n"
82 " andi \\result, \\result, 0x400 \n" 85 " andi %[flags], %[flags], 0x400 \n"
83#elif defined(CONFIG_CPU_MIPSR2) 86#elif defined(CONFIG_CPU_MIPSR2)
84 /* see irqflags.h for inline function */ 87 /* see irqflags.h for inline function */
85#else 88#else
86 " mfc0 \\result, $12 \n" 89 " mfc0 %[flags], $12 \n"
87 " ori $1, \\result, 0x1f \n" 90 " ori $1, %[flags], 0x1f \n"
88 " xori $1, 0x1f \n" 91 " xori $1, 0x1f \n"
89 " .set noreorder \n" 92 " .set noreorder \n"
90 " mtc0 $1, $12 \n" 93 " mtc0 $1, $12 \n"
91#endif 94#endif
92 " irq_disable_hazard \n" 95 " " __stringify(__irq_disable_hazard) " \n"
93 " .set pop \n" 96 " .set pop \n"
94 " .endm \n"); 97 : [flags] "=r" (flags)
98 : /* no inputs */
99 : "memory");
95 100
96notrace unsigned long arch_local_irq_save(void)
97{
98 unsigned long flags;
99 preempt_disable();
100 asm volatile("arch_local_irq_save\t%0"
101 : "=r" (flags)
102 : /* no inputs */
103 : "memory");
104 preempt_enable(); 101 preempt_enable();
102
105 return flags; 103 return flags;
106} 104}
107EXPORT_SYMBOL(arch_local_irq_save); 105EXPORT_SYMBOL(arch_local_irq_save);
108 106
107notrace void arch_local_irq_restore(unsigned long flags)
108{
109 unsigned long __tmp1;
110
111#ifdef CONFIG_MIPS_MT_SMTC
112 /*
113 * SMTC kernel needs to do a software replay of queued
114 * IPIs, at the cost of branch and call overhead on each
115 * local_irq_restore()
116 */
117 if (unlikely(!(flags & 0x0400)))
118 smtc_ipi_replay();
119#endif
120 preempt_disable();
109 121
110__asm__( 122 __asm__ __volatile__(
111 " .macro arch_local_irq_restore flags \n"
112 " .set push \n" 123 " .set push \n"
113 " .set noreorder \n" 124 " .set noreorder \n"
114 " .set noat \n" 125 " .set noat \n"
115#ifdef CONFIG_MIPS_MT_SMTC 126#ifdef CONFIG_MIPS_MT_SMTC
116 "mfc0 $1, $2, 1 \n" 127 " mfc0 $1, $2, 1 \n"
117 "andi \\flags, 0x400 \n" 128 " andi %[flags], 0x400 \n"
118 "ori $1, 0x400 \n" 129 " ori $1, 0x400 \n"
119 "xori $1, 0x400 \n" 130 " xori $1, 0x400 \n"
120 "or \\flags, $1 \n" 131 " or %[flags], $1 \n"
121 "mtc0 \\flags, $2, 1 \n" 132 " mtc0 %[flags], $2, 1 \n"
122#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) 133#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
123 /* see irqflags.h for inline function */ 134 /* see irqflags.h for inline function */
124#elif defined(CONFIG_CPU_MIPSR2) 135#elif defined(CONFIG_CPU_MIPSR2)
125 /* see irqflags.h for inline function */ 136 /* see irqflags.h for inline function */
126#else 137#else
127 " mfc0 $1, $12 \n" 138 " mfc0 $1, $12 \n"
128 " andi \\flags, 1 \n" 139 " andi %[flags], 1 \n"
129 " ori $1, 0x1f \n" 140 " ori $1, 0x1f \n"
130 " xori $1, 0x1f \n" 141 " xori $1, 0x1f \n"
131 " or \\flags, $1 \n" 142 " or %[flags], $1 \n"
132 " mtc0 \\flags, $12 \n" 143 " mtc0 %[flags], $12 \n"
133#endif 144#endif
134 " irq_disable_hazard \n" 145 " " __stringify(__irq_disable_hazard) " \n"
135 " .set pop \n" 146 " .set pop \n"
136 " .endm \n"); 147 : [flags] "=r" (__tmp1)
148 : "0" (flags)
149 : "memory");
137 150
138notrace void arch_local_irq_restore(unsigned long flags)
139{
140 unsigned long __tmp1;
141
142#ifdef CONFIG_MIPS_MT_SMTC
143 /*
144 * SMTC kernel needs to do a software replay of queued
145 * IPIs, at the cost of branch and call overhead on each
146 * local_irq_restore()
147 */
148 if (unlikely(!(flags & 0x0400)))
149 smtc_ipi_replay();
150#endif
151 preempt_disable();
152 __asm__ __volatile__(
153 "arch_local_irq_restore\t%0"
154 : "=r" (__tmp1)
155 : "0" (flags)
156 : "memory");
157 preempt_enable(); 151 preempt_enable();
158} 152}
159EXPORT_SYMBOL(arch_local_irq_restore); 153EXPORT_SYMBOL(arch_local_irq_restore);
@@ -164,11 +158,36 @@ notrace void __arch_local_irq_restore(unsigned long flags)
164 unsigned long __tmp1; 158 unsigned long __tmp1;
165 159
166 preempt_disable(); 160 preempt_disable();
161
167 __asm__ __volatile__( 162 __asm__ __volatile__(
168 "arch_local_irq_restore\t%0" 163 " .set push \n"
169 : "=r" (__tmp1) 164 " .set noreorder \n"
170 : "0" (flags) 165 " .set noat \n"
171 : "memory"); 166#ifdef CONFIG_MIPS_MT_SMTC
167 " mfc0 $1, $2, 1 \n"
168 " andi %[flags], 0x400 \n"
169 " ori $1, 0x400 \n"
170 " xori $1, 0x400 \n"
171 " or %[flags], $1 \n"
172 " mtc0 %[flags], $2, 1 \n"
173#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
174 /* see irqflags.h for inline function */
175#elif defined(CONFIG_CPU_MIPSR2)
176 /* see irqflags.h for inline function */
177#else
178 " mfc0 $1, $12 \n"
179 " andi %[flags], 1 \n"
180 " ori $1, 0x1f \n"
181 " xori $1, 0x1f \n"
182 " or %[flags], $1 \n"
183 " mtc0 %[flags], $12 \n"
184#endif
185 " " __stringify(__irq_disable_hazard) " \n"
186 " .set pop \n"
187 : [flags] "=r" (__tmp1)
188 : "0" (flags)
189 : "memory");
190
172 preempt_enable(); 191 preempt_enable();
173} 192}
174EXPORT_SYMBOL(__arch_local_irq_restore); 193EXPORT_SYMBOL(__arch_local_irq_restore);
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 91615c2ef0cf..8327698b9937 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -9,6 +9,7 @@
9#include <linux/mm.h> 9#include <linux/mm.h>
10 10
11#include <asm/mipsregs.h> 11#include <asm/mipsregs.h>
12#include <asm/mmu_context.h>
12#include <asm/page.h> 13#include <asm/page.h>
13#include <asm/pgtable.h> 14#include <asm/pgtable.h>
14#include <asm/tlbdebug.h> 15#include <asm/tlbdebug.h>
@@ -21,7 +22,7 @@ static void dump_tlb(int first, int last)
21 unsigned int asid; 22 unsigned int asid;
22 unsigned long entryhi, entrylo0; 23 unsigned long entryhi, entrylo0;
23 24
24 asid = read_c0_entryhi() & 0xfc0; 25 asid = ASID_MASK(read_c0_entryhi());
25 26
26 for (i = first; i <= last; i++) { 27 for (i = first; i <= last; i++) {
27 write_c0_index(i<<8); 28 write_c0_index(i<<8);
@@ -35,7 +36,7 @@ static void dump_tlb(int first, int last)
35 36
36 /* Unused entries have a virtual address of KSEG0. */ 37 /* Unused entries have a virtual address of KSEG0. */
37 if ((entryhi & 0xffffe000) != 0x80000000 38 if ((entryhi & 0xffffe000) != 0x80000000
38 && (entryhi & 0xfc0) == asid) { 39 && (ASID_MASK(entryhi) == asid)) {
39 /* 40 /*
40 * Only print entries in use 41 * Only print entries in use
41 */ 42 */
@@ -44,7 +45,7 @@ static void dump_tlb(int first, int last)
44 printk("va=%08lx asid=%08lx" 45 printk("va=%08lx asid=%08lx"
45 " [pa=%06lx n=%d d=%d v=%d g=%d]", 46 " [pa=%06lx n=%d d=%d v=%d g=%d]",
46 (entryhi & 0xffffe000), 47 (entryhi & 0xffffe000),
47 entryhi & 0xfc0, 48 ASID_MASK(entryhi),
48 entrylo0 & PAGE_MASK, 49 entrylo0 & PAGE_MASK,
49 (entrylo0 & (1 << 11)) ? 1 : 0, 50 (entrylo0 & (1 << 11)) ? 1 : 0,
50 (entrylo0 & (1 << 10)) ? 1 : 0, 51 (entrylo0 & (1 << 10)) ? 1 : 0,
diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S
index fdbb970f670d..e362dcdc69d1 100644
--- a/arch/mips/lib/strlen_user.S
+++ b/arch/mips/lib/strlen_user.S
@@ -3,8 +3,9 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle 6 * Copyright (C) 1996, 1998, 1999, 2004 by Ralf Baechle
7 * Copyright (c) 1999 Silicon Graphics, Inc. 7 * Copyright (C) 1999 Silicon Graphics, Inc.
8 * Copyright (C) 2011 MIPS Technologies, Inc.
8 */ 9 */
9#include <asm/asm.h> 10#include <asm/asm.h>
10#include <asm/asm-offsets.h> 11#include <asm/asm-offsets.h>
@@ -28,9 +29,9 @@ LEAF(__strlen_user_asm)
28 29
29FEXPORT(__strlen_user_nocheck_asm) 30FEXPORT(__strlen_user_nocheck_asm)
30 move v0, a0 31 move v0, a0
311: EX(lb, t0, (v0), .Lfault) 321: EX(lbu, v1, (v0), .Lfault)
32 PTR_ADDIU v0, 1 33 PTR_ADDIU v0, 1
33 bnez t0, 1b 34 bnez v1, 1b
34 PTR_SUBU v0, a0 35 PTR_SUBU v0, a0
35 jr ra 36 jr ra
36 END(__strlen_user_asm) 37 END(__strlen_user_asm)
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
index bad539487503..92870b6b53ea 100644
--- a/arch/mips/lib/strncpy_user.S
+++ b/arch/mips/lib/strncpy_user.S
@@ -3,7 +3,8 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 1996, 1999 by Ralf Baechle 6 * Copyright (C) 1996, 1999 by Ralf Baechle
7 * Copyright (C) 2011 MIPS Technologies, Inc.
7 */ 8 */
8#include <linux/errno.h> 9#include <linux/errno.h>
9#include <asm/asm.h> 10#include <asm/asm.h>
@@ -33,26 +34,27 @@ LEAF(__strncpy_from_user_asm)
33 bnez v0, .Lfault 34 bnez v0, .Lfault
34 35
35FEXPORT(__strncpy_from_user_nocheck_asm) 36FEXPORT(__strncpy_from_user_nocheck_asm)
36 move v0, zero
37 move v1, a1
38 .set noreorder 37 .set noreorder
391: EX(lbu, t0, (v1), .Lfault) 38 move t0, zero
39 move v1, a1
401: EX(lbu, v0, (v1), .Lfault)
40 PTR_ADDIU v1, 1 41 PTR_ADDIU v1, 1
41 R10KCBARRIER(0(ra)) 42 R10KCBARRIER(0(ra))
42 beqz t0, 2f 43 beqz v0, 2f
43 sb t0, (a0) 44 sb v0, (a0)
44 PTR_ADDIU v0, 1 45 PTR_ADDIU t0, 1
45 .set reorder 46 bne t0, a2, 1b
46 PTR_ADDIU a0, 1 47 PTR_ADDIU a0, 1
47 bne v0, a2, 1b 482: PTR_ADDU v0, a1, t0
482: PTR_ADDU t0, a1, v0 49 xor v0, a1
49 xor t0, a1 50 bltz v0, .Lfault
50 bltz t0, .Lfault 51 nop
51 jr ra # return n 52 jr ra # return n
53 move v0, t0
52 END(__strncpy_from_user_asm) 54 END(__strncpy_from_user_asm)
53 55
54.Lfault: li v0, -EFAULT 56.Lfault: jr ra
55 jr ra 57 li v0, -EFAULT
56 58
57 .section __ex_table,"a" 59 .section __ex_table,"a"
58 PTR 1b, .Lfault 60 PTR 1b, .Lfault
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index beea03c8c0ce..fcacea5e61f1 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -35,7 +35,7 @@ FEXPORT(__strnlen_user_nocheck_asm)
35 PTR_ADDU a1, a0 # stop pointer 35 PTR_ADDU a1, a0 # stop pointer
361: beq v0, a1, 1f # limit reached? 361: beq v0, a1, 1f # limit reached?
37 EX(lb, t0, (v0), .Lfault) 37 EX(lb, t0, (v0), .Lfault)
38 PTR_ADDU v0, 1 38 PTR_ADDIU v0, 1
39 bnez t0, 1b 39 bnez t0, 1b
401: PTR_SUBU v0, a0 401: PTR_SUBU v0, a0
41 jr ra 41 jr ra
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index afb5a0bcf7a5..f03771900813 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -45,6 +45,7 @@
45#include <asm/signal.h> 45#include <asm/signal.h>
46#include <asm/mipsregs.h> 46#include <asm/mipsregs.h>
47#include <asm/fpu_emulator.h> 47#include <asm/fpu_emulator.h>
48#include <asm/fpu.h>
48#include <asm/uaccess.h> 49#include <asm/uaccess.h>
49#include <asm/branch.h> 50#include <asm/branch.h>
50 51
@@ -81,6 +82,11 @@ DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
81/* Determine rounding mode from the RM bits of the FCSR */ 82/* Determine rounding mode from the RM bits of the FCSR */
82#define modeindex(v) ((v) & FPU_CSR_RM) 83#define modeindex(v) ((v) & FPU_CSR_RM)
83 84
85/* microMIPS bitfields */
86#define MM_POOL32A_MINOR_MASK 0x3f
87#define MM_POOL32A_MINOR_SHIFT 0x6
88#define MM_MIPS32_COND_FC 0x30
89
84/* Convert Mips rounding mode (0..3) to IEEE library modes. */ 90/* Convert Mips rounding mode (0..3) to IEEE library modes. */
85static const unsigned char ieee_rm[4] = { 91static const unsigned char ieee_rm[4] = {
86 [FPU_CSR_RN] = IEEE754_RN, 92 [FPU_CSR_RN] = IEEE754_RN,
@@ -110,6 +116,556 @@ static const unsigned int fpucondbit[8] = {
110}; 116};
111#endif 117#endif
112 118
119/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */
120static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7};
121
122/* (microMIPS) Convert certain microMIPS instructions to MIPS32 format. */
123static const int sd_format[] = {16, 17, 0, 0, 0, 0, 0, 0};
124static const int sdps_format[] = {16, 17, 22, 0, 0, 0, 0, 0};
125static const int dwl_format[] = {17, 20, 21, 0, 0, 0, 0, 0};
126static const int swl_format[] = {16, 20, 21, 0, 0, 0, 0, 0};
127
128/*
129 * This functions translates a 32-bit microMIPS instruction
130 * into a 32-bit MIPS32 instruction. Returns 0 on success
131 * and SIGILL otherwise.
132 */
133static int microMIPS32_to_MIPS32(union mips_instruction *insn_ptr)
134{
135 union mips_instruction insn = *insn_ptr;
136 union mips_instruction mips32_insn = insn;
137 int func, fmt, op;
138
139 switch (insn.mm_i_format.opcode) {
140 case mm_ldc132_op:
141 mips32_insn.mm_i_format.opcode = ldc1_op;
142 mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
143 mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
144 break;
145 case mm_lwc132_op:
146 mips32_insn.mm_i_format.opcode = lwc1_op;
147 mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
148 mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
149 break;
150 case mm_sdc132_op:
151 mips32_insn.mm_i_format.opcode = sdc1_op;
152 mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
153 mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
154 break;
155 case mm_swc132_op:
156 mips32_insn.mm_i_format.opcode = swc1_op;
157 mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
158 mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
159 break;
160 case mm_pool32i_op:
161 /* NOTE: offset is << by 1 if in microMIPS mode. */
162 if ((insn.mm_i_format.rt == mm_bc1f_op) ||
163 (insn.mm_i_format.rt == mm_bc1t_op)) {
164 mips32_insn.fb_format.opcode = cop1_op;
165 mips32_insn.fb_format.bc = bc_op;
166 mips32_insn.fb_format.flag =
167 (insn.mm_i_format.rt == mm_bc1t_op) ? 1 : 0;
168 } else
169 return SIGILL;
170 break;
171 case mm_pool32f_op:
172 switch (insn.mm_fp0_format.func) {
173 case mm_32f_01_op:
174 case mm_32f_11_op:
175 case mm_32f_02_op:
176 case mm_32f_12_op:
177 case mm_32f_41_op:
178 case mm_32f_51_op:
179 case mm_32f_42_op:
180 case mm_32f_52_op:
181 op = insn.mm_fp0_format.func;
182 if (op == mm_32f_01_op)
183 func = madd_s_op;
184 else if (op == mm_32f_11_op)
185 func = madd_d_op;
186 else if (op == mm_32f_02_op)
187 func = nmadd_s_op;
188 else if (op == mm_32f_12_op)
189 func = nmadd_d_op;
190 else if (op == mm_32f_41_op)
191 func = msub_s_op;
192 else if (op == mm_32f_51_op)
193 func = msub_d_op;
194 else if (op == mm_32f_42_op)
195 func = nmsub_s_op;
196 else
197 func = nmsub_d_op;
198 mips32_insn.fp6_format.opcode = cop1x_op;
199 mips32_insn.fp6_format.fr = insn.mm_fp6_format.fr;
200 mips32_insn.fp6_format.ft = insn.mm_fp6_format.ft;
201 mips32_insn.fp6_format.fs = insn.mm_fp6_format.fs;
202 mips32_insn.fp6_format.fd = insn.mm_fp6_format.fd;
203 mips32_insn.fp6_format.func = func;
204 break;
205 case mm_32f_10_op:
206 func = -1; /* Invalid */
207 op = insn.mm_fp5_format.op & 0x7;
208 if (op == mm_ldxc1_op)
209 func = ldxc1_op;
210 else if (op == mm_sdxc1_op)
211 func = sdxc1_op;
212 else if (op == mm_lwxc1_op)
213 func = lwxc1_op;
214 else if (op == mm_swxc1_op)
215 func = swxc1_op;
216
217 if (func != -1) {
218 mips32_insn.r_format.opcode = cop1x_op;
219 mips32_insn.r_format.rs =
220 insn.mm_fp5_format.base;
221 mips32_insn.r_format.rt =
222 insn.mm_fp5_format.index;
223 mips32_insn.r_format.rd = 0;
224 mips32_insn.r_format.re = insn.mm_fp5_format.fd;
225 mips32_insn.r_format.func = func;
226 } else
227 return SIGILL;
228 break;
229 case mm_32f_40_op:
230 op = -1; /* Invalid */
231 if (insn.mm_fp2_format.op == mm_fmovt_op)
232 op = 1;
233 else if (insn.mm_fp2_format.op == mm_fmovf_op)
234 op = 0;
235 if (op != -1) {
236 mips32_insn.fp0_format.opcode = cop1_op;
237 mips32_insn.fp0_format.fmt =
238 sdps_format[insn.mm_fp2_format.fmt];
239 mips32_insn.fp0_format.ft =
240 (insn.mm_fp2_format.cc<<2) + op;
241 mips32_insn.fp0_format.fs =
242 insn.mm_fp2_format.fs;
243 mips32_insn.fp0_format.fd =
244 insn.mm_fp2_format.fd;
245 mips32_insn.fp0_format.func = fmovc_op;
246 } else
247 return SIGILL;
248 break;
249 case mm_32f_60_op:
250 func = -1; /* Invalid */
251 if (insn.mm_fp0_format.op == mm_fadd_op)
252 func = fadd_op;
253 else if (insn.mm_fp0_format.op == mm_fsub_op)
254 func = fsub_op;
255 else if (insn.mm_fp0_format.op == mm_fmul_op)
256 func = fmul_op;
257 else if (insn.mm_fp0_format.op == mm_fdiv_op)
258 func = fdiv_op;
259 if (func != -1) {
260 mips32_insn.fp0_format.opcode = cop1_op;
261 mips32_insn.fp0_format.fmt =
262 sdps_format[insn.mm_fp0_format.fmt];
263 mips32_insn.fp0_format.ft =
264 insn.mm_fp0_format.ft;
265 mips32_insn.fp0_format.fs =
266 insn.mm_fp0_format.fs;
267 mips32_insn.fp0_format.fd =
268 insn.mm_fp0_format.fd;
269 mips32_insn.fp0_format.func = func;
270 } else
271 return SIGILL;
272 break;
273 case mm_32f_70_op:
274 func = -1; /* Invalid */
275 if (insn.mm_fp0_format.op == mm_fmovn_op)
276 func = fmovn_op;
277 else if (insn.mm_fp0_format.op == mm_fmovz_op)
278 func = fmovz_op;
279 if (func != -1) {
280 mips32_insn.fp0_format.opcode = cop1_op;
281 mips32_insn.fp0_format.fmt =
282 sdps_format[insn.mm_fp0_format.fmt];
283 mips32_insn.fp0_format.ft =
284 insn.mm_fp0_format.ft;
285 mips32_insn.fp0_format.fs =
286 insn.mm_fp0_format.fs;
287 mips32_insn.fp0_format.fd =
288 insn.mm_fp0_format.fd;
289 mips32_insn.fp0_format.func = func;
290 } else
291 return SIGILL;
292 break;
293 case mm_32f_73_op: /* POOL32FXF */
294 switch (insn.mm_fp1_format.op) {
295 case mm_movf0_op:
296 case mm_movf1_op:
297 case mm_movt0_op:
298 case mm_movt1_op:
299 if ((insn.mm_fp1_format.op & 0x7f) ==
300 mm_movf0_op)
301 op = 0;
302 else
303 op = 1;
304 mips32_insn.r_format.opcode = spec_op;
305 mips32_insn.r_format.rs = insn.mm_fp4_format.fs;
306 mips32_insn.r_format.rt =
307 (insn.mm_fp4_format.cc << 2) + op;
308 mips32_insn.r_format.rd = insn.mm_fp4_format.rt;
309 mips32_insn.r_format.re = 0;
310 mips32_insn.r_format.func = movc_op;
311 break;
312 case mm_fcvtd0_op:
313 case mm_fcvtd1_op:
314 case mm_fcvts0_op:
315 case mm_fcvts1_op:
316 if ((insn.mm_fp1_format.op & 0x7f) ==
317 mm_fcvtd0_op) {
318 func = fcvtd_op;
319 fmt = swl_format[insn.mm_fp3_format.fmt];
320 } else {
321 func = fcvts_op;
322 fmt = dwl_format[insn.mm_fp3_format.fmt];
323 }
324 mips32_insn.fp0_format.opcode = cop1_op;
325 mips32_insn.fp0_format.fmt = fmt;
326 mips32_insn.fp0_format.ft = 0;
327 mips32_insn.fp0_format.fs =
328 insn.mm_fp3_format.fs;
329 mips32_insn.fp0_format.fd =
330 insn.mm_fp3_format.rt;
331 mips32_insn.fp0_format.func = func;
332 break;
333 case mm_fmov0_op:
334 case mm_fmov1_op:
335 case mm_fabs0_op:
336 case mm_fabs1_op:
337 case mm_fneg0_op:
338 case mm_fneg1_op:
339 if ((insn.mm_fp1_format.op & 0x7f) ==
340 mm_fmov0_op)
341 func = fmov_op;
342 else if ((insn.mm_fp1_format.op & 0x7f) ==
343 mm_fabs0_op)
344 func = fabs_op;
345 else
346 func = fneg_op;
347 mips32_insn.fp0_format.opcode = cop1_op;
348 mips32_insn.fp0_format.fmt =
349 sdps_format[insn.mm_fp3_format.fmt];
350 mips32_insn.fp0_format.ft = 0;
351 mips32_insn.fp0_format.fs =
352 insn.mm_fp3_format.fs;
353 mips32_insn.fp0_format.fd =
354 insn.mm_fp3_format.rt;
355 mips32_insn.fp0_format.func = func;
356 break;
357 case mm_ffloorl_op:
358 case mm_ffloorw_op:
359 case mm_fceill_op:
360 case mm_fceilw_op:
361 case mm_ftruncl_op:
362 case mm_ftruncw_op:
363 case mm_froundl_op:
364 case mm_froundw_op:
365 case mm_fcvtl_op:
366 case mm_fcvtw_op:
367 if (insn.mm_fp1_format.op == mm_ffloorl_op)
368 func = ffloorl_op;
369 else if (insn.mm_fp1_format.op == mm_ffloorw_op)
370 func = ffloor_op;
371 else if (insn.mm_fp1_format.op == mm_fceill_op)
372 func = fceill_op;
373 else if (insn.mm_fp1_format.op == mm_fceilw_op)
374 func = fceil_op;
375 else if (insn.mm_fp1_format.op == mm_ftruncl_op)
376 func = ftruncl_op;
377 else if (insn.mm_fp1_format.op == mm_ftruncw_op)
378 func = ftrunc_op;
379 else if (insn.mm_fp1_format.op == mm_froundl_op)
380 func = froundl_op;
381 else if (insn.mm_fp1_format.op == mm_froundw_op)
382 func = fround_op;
383 else if (insn.mm_fp1_format.op == mm_fcvtl_op)
384 func = fcvtl_op;
385 else
386 func = fcvtw_op;
387 mips32_insn.fp0_format.opcode = cop1_op;
388 mips32_insn.fp0_format.fmt =
389 sd_format[insn.mm_fp1_format.fmt];
390 mips32_insn.fp0_format.ft = 0;
391 mips32_insn.fp0_format.fs =
392 insn.mm_fp1_format.fs;
393 mips32_insn.fp0_format.fd =
394 insn.mm_fp1_format.rt;
395 mips32_insn.fp0_format.func = func;
396 break;
397 case mm_frsqrt_op:
398 case mm_fsqrt_op:
399 case mm_frecip_op:
400 if (insn.mm_fp1_format.op == mm_frsqrt_op)
401 func = frsqrt_op;
402 else if (insn.mm_fp1_format.op == mm_fsqrt_op)
403 func = fsqrt_op;
404 else
405 func = frecip_op;
406 mips32_insn.fp0_format.opcode = cop1_op;
407 mips32_insn.fp0_format.fmt =
408 sdps_format[insn.mm_fp1_format.fmt];
409 mips32_insn.fp0_format.ft = 0;
410 mips32_insn.fp0_format.fs =
411 insn.mm_fp1_format.fs;
412 mips32_insn.fp0_format.fd =
413 insn.mm_fp1_format.rt;
414 mips32_insn.fp0_format.func = func;
415 break;
416 case mm_mfc1_op:
417 case mm_mtc1_op:
418 case mm_cfc1_op:
419 case mm_ctc1_op:
420 if (insn.mm_fp1_format.op == mm_mfc1_op)
421 op = mfc_op;
422 else if (insn.mm_fp1_format.op == mm_mtc1_op)
423 op = mtc_op;
424 else if (insn.mm_fp1_format.op == mm_cfc1_op)
425 op = cfc_op;
426 else
427 op = ctc_op;
428 mips32_insn.fp1_format.opcode = cop1_op;
429 mips32_insn.fp1_format.op = op;
430 mips32_insn.fp1_format.rt =
431 insn.mm_fp1_format.rt;
432 mips32_insn.fp1_format.fs =
433 insn.mm_fp1_format.fs;
434 mips32_insn.fp1_format.fd = 0;
435 mips32_insn.fp1_format.func = 0;
436 break;
437 default:
438 return SIGILL;
439 break;
440 }
441 break;
442 case mm_32f_74_op: /* c.cond.fmt */
443 mips32_insn.fp0_format.opcode = cop1_op;
444 mips32_insn.fp0_format.fmt =
445 sdps_format[insn.mm_fp4_format.fmt];
446 mips32_insn.fp0_format.ft = insn.mm_fp4_format.rt;
447 mips32_insn.fp0_format.fs = insn.mm_fp4_format.fs;
448 mips32_insn.fp0_format.fd = insn.mm_fp4_format.cc << 2;
449 mips32_insn.fp0_format.func =
450 insn.mm_fp4_format.cond | MM_MIPS32_COND_FC;
451 break;
452 default:
453 return SIGILL;
454 break;
455 }
456 break;
457 default:
458 return SIGILL;
459 break;
460 }
461
462 *insn_ptr = mips32_insn;
463 return 0;
464}
465
466int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
467 unsigned long *contpc)
468{
469 union mips_instruction insn = (union mips_instruction)dec_insn.insn;
470 int bc_false = 0;
471 unsigned int fcr31;
472 unsigned int bit;
473
474 switch (insn.mm_i_format.opcode) {
475 case mm_pool32a_op:
476 if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
477 mm_pool32axf_op) {
478 switch (insn.mm_i_format.simmediate >>
479 MM_POOL32A_MINOR_SHIFT) {
480 case mm_jalr_op:
481 case mm_jalrhb_op:
482 case mm_jalrs_op:
483 case mm_jalrshb_op:
484 if (insn.mm_i_format.rt != 0) /* Not mm_jr */
485 regs->regs[insn.mm_i_format.rt] =
486 regs->cp0_epc +
487 dec_insn.pc_inc +
488 dec_insn.next_pc_inc;
489 *contpc = regs->regs[insn.mm_i_format.rs];
490 return 1;
491 break;
492 }
493 }
494 break;
495 case mm_pool32i_op:
496 switch (insn.mm_i_format.rt) {
497 case mm_bltzals_op:
498 case mm_bltzal_op:
499 regs->regs[31] = regs->cp0_epc +
500 dec_insn.pc_inc +
501 dec_insn.next_pc_inc;
502 /* Fall through */
503 case mm_bltz_op:
504 if ((long)regs->regs[insn.mm_i_format.rs] < 0)
505 *contpc = regs->cp0_epc +
506 dec_insn.pc_inc +
507 (insn.mm_i_format.simmediate << 1);
508 else
509 *contpc = regs->cp0_epc +
510 dec_insn.pc_inc +
511 dec_insn.next_pc_inc;
512 return 1;
513 break;
514 case mm_bgezals_op:
515 case mm_bgezal_op:
516 regs->regs[31] = regs->cp0_epc +
517 dec_insn.pc_inc +
518 dec_insn.next_pc_inc;
519 /* Fall through */
520 case mm_bgez_op:
521 if ((long)regs->regs[insn.mm_i_format.rs] >= 0)
522 *contpc = regs->cp0_epc +
523 dec_insn.pc_inc +
524 (insn.mm_i_format.simmediate << 1);
525 else
526 *contpc = regs->cp0_epc +
527 dec_insn.pc_inc +
528 dec_insn.next_pc_inc;
529 return 1;
530 break;
531 case mm_blez_op:
532 if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
533 *contpc = regs->cp0_epc +
534 dec_insn.pc_inc +
535 (insn.mm_i_format.simmediate << 1);
536 else
537 *contpc = regs->cp0_epc +
538 dec_insn.pc_inc +
539 dec_insn.next_pc_inc;
540 return 1;
541 break;
542 case mm_bgtz_op:
543 if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
544 *contpc = regs->cp0_epc +
545 dec_insn.pc_inc +
546 (insn.mm_i_format.simmediate << 1);
547 else
548 *contpc = regs->cp0_epc +
549 dec_insn.pc_inc +
550 dec_insn.next_pc_inc;
551 return 1;
552 break;
553 case mm_bc2f_op:
554 case mm_bc1f_op:
555 bc_false = 1;
556 /* Fall through */
557 case mm_bc2t_op:
558 case mm_bc1t_op:
559 preempt_disable();
560 if (is_fpu_owner())
561 asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
562 else
563 fcr31 = current->thread.fpu.fcr31;
564 preempt_enable();
565
566 if (bc_false)
567 fcr31 = ~fcr31;
568
569 bit = (insn.mm_i_format.rs >> 2);
570 bit += (bit != 0);
571 bit += 23;
572 if (fcr31 & (1 << bit))
573 *contpc = regs->cp0_epc +
574 dec_insn.pc_inc +
575 (insn.mm_i_format.simmediate << 1);
576 else
577 *contpc = regs->cp0_epc +
578 dec_insn.pc_inc + dec_insn.next_pc_inc;
579 return 1;
580 break;
581 }
582 break;
583 case mm_pool16c_op:
584 switch (insn.mm_i_format.rt) {
585 case mm_jalr16_op:
586 case mm_jalrs16_op:
587 regs->regs[31] = regs->cp0_epc +
588 dec_insn.pc_inc + dec_insn.next_pc_inc;
589 /* Fall through */
590 case mm_jr16_op:
591 *contpc = regs->regs[insn.mm_i_format.rs];
592 return 1;
593 break;
594 }
595 break;
596 case mm_beqz16_op:
597 if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0)
598 *contpc = regs->cp0_epc +
599 dec_insn.pc_inc +
600 (insn.mm_b1_format.simmediate << 1);
601 else
602 *contpc = regs->cp0_epc +
603 dec_insn.pc_inc + dec_insn.next_pc_inc;
604 return 1;
605 break;
606 case mm_bnez16_op:
607 if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0)
608 *contpc = regs->cp0_epc +
609 dec_insn.pc_inc +
610 (insn.mm_b1_format.simmediate << 1);
611 else
612 *contpc = regs->cp0_epc +
613 dec_insn.pc_inc + dec_insn.next_pc_inc;
614 return 1;
615 break;
616 case mm_b16_op:
617 *contpc = regs->cp0_epc + dec_insn.pc_inc +
618 (insn.mm_b0_format.simmediate << 1);
619 return 1;
620 break;
621 case mm_beq32_op:
622 if (regs->regs[insn.mm_i_format.rs] ==
623 regs->regs[insn.mm_i_format.rt])
624 *contpc = regs->cp0_epc +
625 dec_insn.pc_inc +
626 (insn.mm_i_format.simmediate << 1);
627 else
628 *contpc = regs->cp0_epc +
629 dec_insn.pc_inc +
630 dec_insn.next_pc_inc;
631 return 1;
632 break;
633 case mm_bne32_op:
634 if (regs->regs[insn.mm_i_format.rs] !=
635 regs->regs[insn.mm_i_format.rt])
636 *contpc = regs->cp0_epc +
637 dec_insn.pc_inc +
638 (insn.mm_i_format.simmediate << 1);
639 else
640 *contpc = regs->cp0_epc +
641 dec_insn.pc_inc + dec_insn.next_pc_inc;
642 return 1;
643 break;
644 case mm_jalx32_op:
645 regs->regs[31] = regs->cp0_epc +
646 dec_insn.pc_inc + dec_insn.next_pc_inc;
647 *contpc = regs->cp0_epc + dec_insn.pc_inc;
648 *contpc >>= 28;
649 *contpc <<= 28;
650 *contpc |= (insn.j_format.target << 2);
651 return 1;
652 break;
653 case mm_jals32_op:
654 case mm_jal32_op:
655 regs->regs[31] = regs->cp0_epc +
656 dec_insn.pc_inc + dec_insn.next_pc_inc;
657 /* Fall through */
658 case mm_j32_op:
659 *contpc = regs->cp0_epc + dec_insn.pc_inc;
660 *contpc >>= 27;
661 *contpc <<= 27;
662 *contpc |= (insn.j_format.target << 1);
663 set_isa16_mode(*contpc);
664 return 1;
665 break;
666 }
667 return 0;
668}
113 669
114/* 670/*
115 * Redundant with logic already in kernel/branch.c, 671 * Redundant with logic already in kernel/branch.c,
@@ -117,53 +673,177 @@ static const unsigned int fpucondbit[8] = {
117 * a single subroutine should be used across both 673 * a single subroutine should be used across both
118 * modules. 674 * modules.
119 */ 675 */
120static int isBranchInstr(mips_instruction * i) 676static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
677 unsigned long *contpc)
121{ 678{
122 switch (MIPSInst_OPCODE(*i)) { 679 union mips_instruction insn = (union mips_instruction)dec_insn.insn;
680 unsigned int fcr31;
681 unsigned int bit = 0;
682
683 switch (insn.i_format.opcode) {
123 case spec_op: 684 case spec_op:
124 switch (MIPSInst_FUNC(*i)) { 685 switch (insn.r_format.func) {
125 case jalr_op: 686 case jalr_op:
687 regs->regs[insn.r_format.rd] =
688 regs->cp0_epc + dec_insn.pc_inc +
689 dec_insn.next_pc_inc;
690 /* Fall through */
126 case jr_op: 691 case jr_op:
692 *contpc = regs->regs[insn.r_format.rs];
127 return 1; 693 return 1;
694 break;
128 } 695 }
129 break; 696 break;
130
131 case bcond_op: 697 case bcond_op:
132 switch (MIPSInst_RT(*i)) { 698 switch (insn.i_format.rt) {
699 case bltzal_op:
700 case bltzall_op:
701 regs->regs[31] = regs->cp0_epc +
702 dec_insn.pc_inc +
703 dec_insn.next_pc_inc;
704 /* Fall through */
133 case bltz_op: 705 case bltz_op:
134 case bgez_op:
135 case bltzl_op: 706 case bltzl_op:
136 case bgezl_op: 707 if ((long)regs->regs[insn.i_format.rs] < 0)
137 case bltzal_op: 708 *contpc = regs->cp0_epc +
709 dec_insn.pc_inc +
710 (insn.i_format.simmediate << 2);
711 else
712 *contpc = regs->cp0_epc +
713 dec_insn.pc_inc +
714 dec_insn.next_pc_inc;
715 return 1;
716 break;
138 case bgezal_op: 717 case bgezal_op:
139 case bltzall_op:
140 case bgezall_op: 718 case bgezall_op:
719 regs->regs[31] = regs->cp0_epc +
720 dec_insn.pc_inc +
721 dec_insn.next_pc_inc;
722 /* Fall through */
723 case bgez_op:
724 case bgezl_op:
725 if ((long)regs->regs[insn.i_format.rs] >= 0)
726 *contpc = regs->cp0_epc +
727 dec_insn.pc_inc +
728 (insn.i_format.simmediate << 2);
729 else
730 *contpc = regs->cp0_epc +
731 dec_insn.pc_inc +
732 dec_insn.next_pc_inc;
141 return 1; 733 return 1;
734 break;
142 } 735 }
143 break; 736 break;
144
145 case j_op:
146 case jal_op:
147 case jalx_op: 737 case jalx_op:
738 set_isa16_mode(bit);
739 case jal_op:
740 regs->regs[31] = regs->cp0_epc +
741 dec_insn.pc_inc +
742 dec_insn.next_pc_inc;
743 /* Fall through */
744 case j_op:
745 *contpc = regs->cp0_epc + dec_insn.pc_inc;
746 *contpc >>= 28;
747 *contpc <<= 28;
748 *contpc |= (insn.j_format.target << 2);
749 /* Set microMIPS mode bit: XOR for jalx. */
750 *contpc ^= bit;
751 return 1;
752 break;
148 case beq_op: 753 case beq_op:
149 case bne_op:
150 case blez_op:
151 case bgtz_op:
152 case beql_op: 754 case beql_op:
755 if (regs->regs[insn.i_format.rs] ==
756 regs->regs[insn.i_format.rt])
757 *contpc = regs->cp0_epc +
758 dec_insn.pc_inc +
759 (insn.i_format.simmediate << 2);
760 else
761 *contpc = regs->cp0_epc +
762 dec_insn.pc_inc +
763 dec_insn.next_pc_inc;
764 return 1;
765 break;
766 case bne_op:
153 case bnel_op: 767 case bnel_op:
768 if (regs->regs[insn.i_format.rs] !=
769 regs->regs[insn.i_format.rt])
770 *contpc = regs->cp0_epc +
771 dec_insn.pc_inc +
772 (insn.i_format.simmediate << 2);
773 else
774 *contpc = regs->cp0_epc +
775 dec_insn.pc_inc +
776 dec_insn.next_pc_inc;
777 return 1;
778 break;
779 case blez_op:
154 case blezl_op: 780 case blezl_op:
781 if ((long)regs->regs[insn.i_format.rs] <= 0)
782 *contpc = regs->cp0_epc +
783 dec_insn.pc_inc +
784 (insn.i_format.simmediate << 2);
785 else
786 *contpc = regs->cp0_epc +
787 dec_insn.pc_inc +
788 dec_insn.next_pc_inc;
789 return 1;
790 break;
791 case bgtz_op:
155 case bgtzl_op: 792 case bgtzl_op:
793 if ((long)regs->regs[insn.i_format.rs] > 0)
794 *contpc = regs->cp0_epc +
795 dec_insn.pc_inc +
796 (insn.i_format.simmediate << 2);
797 else
798 *contpc = regs->cp0_epc +
799 dec_insn.pc_inc +
800 dec_insn.next_pc_inc;
156 return 1; 801 return 1;
157 802 break;
158 case cop0_op: 803 case cop0_op:
159 case cop1_op: 804 case cop1_op:
160 case cop2_op: 805 case cop2_op:
161 case cop1x_op: 806 case cop1x_op:
162 if (MIPSInst_RS(*i) == bc_op) 807 if (insn.i_format.rs == bc_op) {
163 return 1; 808 preempt_disable();
809 if (is_fpu_owner())
810 asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
811 else
812 fcr31 = current->thread.fpu.fcr31;
813 preempt_enable();
814
815 bit = (insn.i_format.rt >> 2);
816 bit += (bit != 0);
817 bit += 23;
818 switch (insn.i_format.rt & 3) {
819 case 0: /* bc1f */
820 case 2: /* bc1fl */
821 if (~fcr31 & (1 << bit))
822 *contpc = regs->cp0_epc +
823 dec_insn.pc_inc +
824 (insn.i_format.simmediate << 2);
825 else
826 *contpc = regs->cp0_epc +
827 dec_insn.pc_inc +
828 dec_insn.next_pc_inc;
829 return 1;
830 break;
831 case 1: /* bc1t */
832 case 3: /* bc1tl */
833 if (fcr31 & (1 << bit))
834 *contpc = regs->cp0_epc +
835 dec_insn.pc_inc +
836 (insn.i_format.simmediate << 2);
837 else
838 *contpc = regs->cp0_epc +
839 dec_insn.pc_inc +
840 dec_insn.next_pc_inc;
841 return 1;
842 break;
843 }
844 }
164 break; 845 break;
165 } 846 }
166
167 return 0; 847 return 0;
168} 848}
169 849
@@ -210,26 +890,23 @@ static inline int cop1_64bit(struct pt_regs *xcp)
210 */ 890 */
211 891
212static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, 892static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
213 void *__user *fault_addr) 893 struct mm_decoded_insn dec_insn, void *__user *fault_addr)
214{ 894{
215 mips_instruction ir; 895 mips_instruction ir;
216 unsigned long emulpc, contpc; 896 unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc;
217 unsigned int cond; 897 unsigned int cond;
218 898 int pc_inc;
219 if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
220 MIPS_FPU_EMU_INC_STATS(errors);
221 *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
222 return SIGBUS;
223 }
224 if (__get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) {
225 MIPS_FPU_EMU_INC_STATS(errors);
226 *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
227 return SIGSEGV;
228 }
229 899
230 /* XXX NEC Vr54xx bug workaround */ 900 /* XXX NEC Vr54xx bug workaround */
231 if ((xcp->cp0_cause & CAUSEF_BD) && !isBranchInstr(&ir)) 901 if (xcp->cp0_cause & CAUSEF_BD) {
232 xcp->cp0_cause &= ~CAUSEF_BD; 902 if (dec_insn.micro_mips_mode) {
903 if (!mm_isBranchInstr(xcp, dec_insn, &contpc))
904 xcp->cp0_cause &= ~CAUSEF_BD;
905 } else {
906 if (!isBranchInstr(xcp, dec_insn, &contpc))
907 xcp->cp0_cause &= ~CAUSEF_BD;
908 }
909 }
233 910
234 if (xcp->cp0_cause & CAUSEF_BD) { 911 if (xcp->cp0_cause & CAUSEF_BD) {
235 /* 912 /*
@@ -244,32 +921,33 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
244 * Linux MIPS branch emulator operates on context, updating the 921 * Linux MIPS branch emulator operates on context, updating the
245 * cp0_epc. 922 * cp0_epc.
246 */ 923 */
247 emulpc = xcp->cp0_epc + 4; /* Snapshot emulation target */ 924 ir = dec_insn.next_insn; /* process delay slot instr */
925 pc_inc = dec_insn.next_pc_inc;
926 } else {
927 ir = dec_insn.insn; /* process current instr */
928 pc_inc = dec_insn.pc_inc;
929 }
248 930
249 if (__compute_return_epc(xcp) < 0) { 931 /*
250#ifdef CP1DBG 932 * Since microMIPS FPU instructios are a subset of MIPS32 FPU
251 printk("failed to emulate branch at %p\n", 933 * instructions, we want to convert microMIPS FPU instructions
252 (void *) (xcp->cp0_epc)); 934 * into MIPS32 instructions so that we could reuse all of the
253#endif 935 * FPU emulation code.
936 *
937 * NOTE: We cannot do this for branch instructions since they
938 * are not a subset. Example: Cannot emulate a 16-bit
939 * aligned target address with a MIPS32 instruction.
940 */
941 if (dec_insn.micro_mips_mode) {
942 /*
943 * If next instruction is a 16-bit instruction, then it
944 * it cannot be a FPU instruction. This could happen
945 * since we can be called for non-FPU instructions.
946 */
947 if ((pc_inc == 2) ||
948 (microMIPS32_to_MIPS32((union mips_instruction *)&ir)
949 == SIGILL))
254 return SIGILL; 950 return SIGILL;
255 }
256 if (!access_ok(VERIFY_READ, emulpc, sizeof(mips_instruction))) {
257 MIPS_FPU_EMU_INC_STATS(errors);
258 *fault_addr = (mips_instruction __user *)emulpc;
259 return SIGBUS;
260 }
261 if (__get_user(ir, (mips_instruction __user *) emulpc)) {
262 MIPS_FPU_EMU_INC_STATS(errors);
263 *fault_addr = (mips_instruction __user *)emulpc;
264 return SIGSEGV;
265 }
266 /* __compute_return_epc() will have updated cp0_epc */
267 contpc = xcp->cp0_epc;
268 /* In order not to confuse ptrace() et al, tweak context */
269 xcp->cp0_epc = emulpc - 4;
270 } else {
271 emulpc = xcp->cp0_epc;
272 contpc = xcp->cp0_epc + 4;
273 } 951 }
274 952
275 emul: 953 emul:
@@ -474,22 +1152,35 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
474 /* branch taken: emulate dslot 1152 /* branch taken: emulate dslot
475 * instruction 1153 * instruction
476 */ 1154 */
477 xcp->cp0_epc += 4; 1155 xcp->cp0_epc += dec_insn.pc_inc;
478 contpc = (xcp->cp0_epc + 1156
479 (MIPSInst_SIMM(ir) << 2)); 1157 contpc = MIPSInst_SIMM(ir);
480 1158 ir = dec_insn.next_insn;
481 if (!access_ok(VERIFY_READ, xcp->cp0_epc, 1159 if (dec_insn.micro_mips_mode) {
482 sizeof(mips_instruction))) { 1160 contpc = (xcp->cp0_epc + (contpc << 1));
483 MIPS_FPU_EMU_INC_STATS(errors); 1161
484 *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 1162 /* If 16-bit instruction, not FPU. */
485 return SIGBUS; 1163 if ((dec_insn.next_pc_inc == 2) ||
486 } 1164 (microMIPS32_to_MIPS32((union mips_instruction *)&ir) == SIGILL)) {
487 if (__get_user(ir, 1165
488 (mips_instruction __user *) xcp->cp0_epc)) { 1166 /*
489 MIPS_FPU_EMU_INC_STATS(errors); 1167 * Since this instruction will
490 *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 1168 * be put on the stack with
491 return SIGSEGV; 1169 * 32-bit words, get around
492 } 1170 * this problem by putting a
1171 * NOP16 as the second one.
1172 */
1173 if (dec_insn.next_pc_inc == 2)
1174 ir = (ir & (~0xffff)) | MM_NOP16;
1175
1176 /*
1177 * Single step the non-CP1
1178 * instruction in the dslot.
1179 */
1180 return mips_dsemul(xcp, ir, contpc);
1181 }
1182 } else
1183 contpc = (xcp->cp0_epc + (contpc << 2));
493 1184
494 switch (MIPSInst_OPCODE(ir)) { 1185 switch (MIPSInst_OPCODE(ir)) {
495 case lwc1_op: 1186 case lwc1_op:
@@ -525,8 +1216,8 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
525 * branch likely nullifies 1216 * branch likely nullifies
526 * dslot if not taken 1217 * dslot if not taken
527 */ 1218 */
528 xcp->cp0_epc += 4; 1219 xcp->cp0_epc += dec_insn.pc_inc;
529 contpc += 4; 1220 contpc += dec_insn.pc_inc;
530 /* 1221 /*
531 * else continue & execute 1222 * else continue & execute
532 * dslot as normal insn 1223 * dslot as normal insn
@@ -1313,25 +2004,75 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1313 int has_fpu, void *__user *fault_addr) 2004 int has_fpu, void *__user *fault_addr)
1314{ 2005{
1315 unsigned long oldepc, prevepc; 2006 unsigned long oldepc, prevepc;
1316 mips_instruction insn; 2007 struct mm_decoded_insn dec_insn;
2008 u16 instr[4];
2009 u16 *instr_ptr;
1317 int sig = 0; 2010 int sig = 0;
1318 2011
1319 oldepc = xcp->cp0_epc; 2012 oldepc = xcp->cp0_epc;
1320 do { 2013 do {
1321 prevepc = xcp->cp0_epc; 2014 prevepc = xcp->cp0_epc;
1322 2015
1323 if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) { 2016 if (get_isa16_mode(prevepc) && cpu_has_mmips) {
1324 MIPS_FPU_EMU_INC_STATS(errors); 2017 /*
1325 *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 2018 * Get next 2 microMIPS instructions and convert them
1326 return SIGBUS; 2019 * into 32-bit instructions.
1327 } 2020 */
1328 if (__get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) { 2021 if ((get_user(instr[0], (u16 __user *)msk_isa16_mode(xcp->cp0_epc))) ||
1329 MIPS_FPU_EMU_INC_STATS(errors); 2022 (get_user(instr[1], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 2))) ||
1330 *fault_addr = (mips_instruction __user *)xcp->cp0_epc; 2023 (get_user(instr[2], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 4))) ||
1331 return SIGSEGV; 2024 (get_user(instr[3], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 6)))) {
2025 MIPS_FPU_EMU_INC_STATS(errors);
2026 return SIGBUS;
2027 }
2028 instr_ptr = instr;
2029
2030 /* Get first instruction. */
2031 if (mm_insn_16bit(*instr_ptr)) {
2032 /* Duplicate the half-word. */
2033 dec_insn.insn = (*instr_ptr << 16) |
2034 (*instr_ptr);
2035 /* 16-bit instruction. */
2036 dec_insn.pc_inc = 2;
2037 instr_ptr += 1;
2038 } else {
2039 dec_insn.insn = (*instr_ptr << 16) |
2040 *(instr_ptr+1);
2041 /* 32-bit instruction. */
2042 dec_insn.pc_inc = 4;
2043 instr_ptr += 2;
2044 }
2045 /* Get second instruction. */
2046 if (mm_insn_16bit(*instr_ptr)) {
2047 /* Duplicate the half-word. */
2048 dec_insn.next_insn = (*instr_ptr << 16) |
2049 (*instr_ptr);
2050 /* 16-bit instruction. */
2051 dec_insn.next_pc_inc = 2;
2052 } else {
2053 dec_insn.next_insn = (*instr_ptr << 16) |
2054 *(instr_ptr+1);
2055 /* 32-bit instruction. */
2056 dec_insn.next_pc_inc = 4;
2057 }
2058 dec_insn.micro_mips_mode = 1;
2059 } else {
2060 if ((get_user(dec_insn.insn,
2061 (mips_instruction __user *) xcp->cp0_epc)) ||
2062 (get_user(dec_insn.next_insn,
2063 (mips_instruction __user *)(xcp->cp0_epc+4)))) {
2064 MIPS_FPU_EMU_INC_STATS(errors);
2065 return SIGBUS;
2066 }
2067 dec_insn.pc_inc = 4;
2068 dec_insn.next_pc_inc = 4;
2069 dec_insn.micro_mips_mode = 0;
1332 } 2070 }
1333 if (insn == 0) 2071
1334 xcp->cp0_epc += 4; /* skip nops */ 2072 if ((dec_insn.insn == 0) ||
2073 ((dec_insn.pc_inc == 2) &&
2074 ((dec_insn.insn & 0xffff) == MM_NOP16)))
2075 xcp->cp0_epc += dec_insn.pc_inc; /* Skip NOPs */
1335 else { 2076 else {
1336 /* 2077 /*
1337 * The 'ieee754_csr' is an alias of 2078 * The 'ieee754_csr' is an alias of
@@ -1341,7 +2082,7 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1341 */ 2082 */
1342 /* convert to ieee library modes */ 2083 /* convert to ieee library modes */
1343 ieee754_csr.rm = ieee_rm[ieee754_csr.rm]; 2084 ieee754_csr.rm = ieee_rm[ieee754_csr.rm];
1344 sig = cop1Emulate(xcp, ctx, fault_addr); 2085 sig = cop1Emulate(xcp, ctx, dec_insn, fault_addr);
1345 /* revert to mips rounding mode */ 2086 /* revert to mips rounding mode */
1346 ieee754_csr.rm = mips_rm[ieee754_csr.rm]; 2087 ieee754_csr.rm = mips_rm[ieee754_csr.rm];
1347 } 2088 }
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index 384a3b0091ea..7ea622ab8dad 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -55,7 +55,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
55 struct emuframe __user *fr; 55 struct emuframe __user *fr;
56 int err; 56 int err;
57 57
58 if (ir == 0) { /* a nop is easy */ 58 if ((get_isa16_mode(regs->cp0_epc) && ((ir >> 16) == MM_NOP16)) ||
59 (ir == 0)) {
60 /* NOP is easy */
59 regs->cp0_epc = cpc; 61 regs->cp0_epc = cpc;
60 regs->cp0_cause &= ~CAUSEF_BD; 62 regs->cp0_cause &= ~CAUSEF_BD;
61 return 0; 63 return 0;
@@ -91,8 +93,16 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
91 if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe)))) 93 if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe))))
92 return SIGBUS; 94 return SIGBUS;
93 95
94 err = __put_user(ir, &fr->emul); 96 if (get_isa16_mode(regs->cp0_epc)) {
95 err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst); 97 err = __put_user(ir >> 16, (u16 __user *)(&fr->emul));
98 err |= __put_user(ir & 0xffff, (u16 __user *)((long)(&fr->emul) + 2));
99 err |= __put_user(BREAK_MATH >> 16, (u16 __user *)(&fr->badinst));
100 err |= __put_user(BREAK_MATH & 0xffff, (u16 __user *)((long)(&fr->badinst) + 2));
101 } else {
102 err = __put_user(ir, &fr->emul);
103 err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst);
104 }
105
96 err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie); 106 err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie);
97 err |= __put_user(cpc, &fr->epc); 107 err |= __put_user(cpc, &fr->epc);
98 108
@@ -101,7 +111,8 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
101 return SIGBUS; 111 return SIGBUS;
102 } 112 }
103 113
104 regs->cp0_epc = (unsigned long) &fr->emul; 114 regs->cp0_epc = ((unsigned long) &fr->emul) |
115 get_isa16_mode(regs->cp0_epc);
105 116
106 flush_cache_sigtramp((unsigned long)&fr->badinst); 117 flush_cache_sigtramp((unsigned long)&fr->badinst);
107 118
@@ -114,9 +125,10 @@ int do_dsemulret(struct pt_regs *xcp)
114 unsigned long epc; 125 unsigned long epc;
115 u32 insn, cookie; 126 u32 insn, cookie;
116 int err = 0; 127 int err = 0;
128 u16 instr[2];
117 129
118 fr = (struct emuframe __user *) 130 fr = (struct emuframe __user *)
119 (xcp->cp0_epc - sizeof(mips_instruction)); 131 (msk_isa16_mode(xcp->cp0_epc) - sizeof(mips_instruction));
120 132
121 /* 133 /*
122 * If we can't even access the area, something is very wrong, but we'll 134 * If we can't even access the area, something is very wrong, but we'll
@@ -131,7 +143,13 @@ int do_dsemulret(struct pt_regs *xcp)
131 * - Is the instruction pointed to by the EPC an BREAK_MATH? 143 * - Is the instruction pointed to by the EPC an BREAK_MATH?
132 * - Is the following memory word the BD_COOKIE? 144 * - Is the following memory word the BD_COOKIE?
133 */ 145 */
134 err = __get_user(insn, &fr->badinst); 146 if (get_isa16_mode(xcp->cp0_epc)) {
147 err = __get_user(instr[0], (u16 __user *)(&fr->badinst));
148 err |= __get_user(instr[1], (u16 __user *)((long)(&fr->badinst) + 2));
149 insn = (instr[0] << 16) | instr[1];
150 } else {
151 err = __get_user(insn, &fr->badinst);
152 }
135 err |= __get_user(cookie, &fr->cookie); 153 err |= __get_user(cookie, &fr->cookie);
136 154
137 if (unlikely(err || (insn != BREAK_MATH) || (cookie != BD_COOKIE))) { 155 if (unlikely(err || (insn != BREAK_MATH) || (cookie != BD_COOKIE))) {
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 1dcec30ad1c4..e87aae1f2e80 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-y += cache.o dma-default.o extable.o fault.o \ 5obj-y += cache.o dma-default.o extable.o fault.o \
6 gup.o init.o mmap.o page.o page-funcs.o \ 6 gup.o init.o mmap.o page.o page-funcs.o \
7 tlbex.o tlbex-fault.o uasm.o 7 tlbex.o tlbex-fault.o uasm-mips.o
8 8
9obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o 9obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
10obj-$(CONFIG_64BIT) += pgtable-64.o 10obj-$(CONFIG_64BIT) += pgtable-64.o
@@ -22,3 +22,5 @@ obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
22obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o 22obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
23obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o 23obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
24obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o 24obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
25
26obj-$(CONFIG_SYS_SUPPORTS_MICROMIPS) += uasm-micromips.o
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 2078915eacb9..21813beec7a5 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -33,6 +33,7 @@
33#include <asm/war.h> 33#include <asm/war.h>
34#include <asm/cacheflush.h> /* for run_uncached() */ 34#include <asm/cacheflush.h> /* for run_uncached() */
35#include <asm/traps.h> 35#include <asm/traps.h>
36#include <asm/dma-coherence.h>
36 37
37/* 38/*
38 * Special Variant of smp_call_function for use by cache functions: 39 * Special Variant of smp_call_function for use by cache functions:
@@ -136,7 +137,8 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
136 r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; 137 r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
137} 138}
138 139
139static void (* r4k_blast_dcache)(void); 140void (* r4k_blast_dcache)(void);
141EXPORT_SYMBOL(r4k_blast_dcache);
140 142
141static void __cpuinit r4k_blast_dcache_setup(void) 143static void __cpuinit r4k_blast_dcache_setup(void)
142{ 144{
@@ -264,7 +266,8 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
264 r4k_blast_icache_page_indexed = blast_icache64_page_indexed; 266 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
265} 267}
266 268
267static void (* r4k_blast_icache)(void); 269void (* r4k_blast_icache)(void);
270EXPORT_SYMBOL(r4k_blast_icache);
268 271
269static void __cpuinit r4k_blast_icache_setup(void) 272static void __cpuinit r4k_blast_icache_setup(void)
270{ 273{
@@ -1377,20 +1380,6 @@ static void __cpuinit coherency_setup(void)
1377 } 1380 }
1378} 1381}
1379 1382
1380#if defined(CONFIG_DMA_NONCOHERENT)
1381
1382static int __cpuinitdata coherentio;
1383
1384static int __init setcoherentio(char *str)
1385{
1386 coherentio = 1;
1387
1388 return 0;
1389}
1390
1391early_param("coherentio", setcoherentio);
1392#endif
1393
1394static void __cpuinit r4k_cache_error_setup(void) 1383static void __cpuinit r4k_cache_error_setup(void)
1395{ 1384{
1396 extern char __weak except_vec2_generic; 1385 extern char __weak except_vec2_generic;
@@ -1472,9 +1461,14 @@ void __cpuinit r4k_cache_init(void)
1472 1461
1473 build_clear_page(); 1462 build_clear_page();
1474 build_copy_page(); 1463 build_copy_page();
1475#if !defined(CONFIG_MIPS_CMP) 1464
1465 /*
1466 * We want to run CMP kernels on core with and without coherent
1467 * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
1468 * or not to flush caches.
1469 */
1476 local_r4k___flush_cache_all(NULL); 1470 local_r4k___flush_cache_all(NULL);
1477#endif 1471
1478 coherency_setup(); 1472 coherency_setup();
1479 board_cache_error_setup = r4k_cache_error_setup; 1473 board_cache_error_setup = r4k_cache_error_setup;
1480} 1474}
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 07cec4407b0c..5aeb3eb0b72f 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -48,6 +48,7 @@ void (*flush_icache_all)(void);
48 48
49EXPORT_SYMBOL_GPL(local_flush_data_cache_page); 49EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
50EXPORT_SYMBOL(flush_data_cache_page); 50EXPORT_SYMBOL(flush_data_cache_page);
51EXPORT_SYMBOL(flush_icache_all);
51 52
52#ifdef CONFIG_DMA_NONCOHERENT 53#ifdef CONFIG_DMA_NONCOHERENT
53 54
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index f9ef83829a52..caf92ecb37d6 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -22,6 +22,26 @@
22 22
23#include <dma-coherence.h> 23#include <dma-coherence.h>
24 24
25int coherentio = 0; /* User defined DMA coherency from command line. */
26EXPORT_SYMBOL_GPL(coherentio);
27int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
28
29static int __init setcoherentio(char *str)
30{
31 coherentio = 1;
32 pr_info("Hardware DMA cache coherency (command line)\n");
33 return 0;
34}
35early_param("coherentio", setcoherentio);
36
37static int __init setnocoherentio(char *str)
38{
39 coherentio = 0;
40 pr_info("Software DMA cache coherency (command line)\n");
41 return 0;
42}
43early_param("nocoherentio", setnocoherentio);
44
25static inline struct page *dma_addr_to_page(struct device *dev, 45static inline struct page *dma_addr_to_page(struct device *dev,
26 dma_addr_t dma_addr) 46 dma_addr_t dma_addr)
27{ 47{
@@ -115,7 +135,8 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
115 135
116 if (!plat_device_is_coherent(dev)) { 136 if (!plat_device_is_coherent(dev)) {
117 dma_cache_wback_inv((unsigned long) ret, size); 137 dma_cache_wback_inv((unsigned long) ret, size);
118 ret = UNCAC_ADDR(ret); 138 if (!hw_coherentio)
139 ret = UNCAC_ADDR(ret);
119 } 140 }
120 } 141 }
121 142
@@ -142,7 +163,7 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
142 163
143 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); 164 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
144 165
145 if (!plat_device_is_coherent(dev)) 166 if (!plat_device_is_coherent(dev) && !hw_coherentio)
146 addr = CAC_ADDR(addr); 167 addr = CAC_ADDR(addr);
147 168
148 free_pages(addr, get_order(size)); 169 free_pages(addr, get_order(size));
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index a29fba55b53e..4eb8dcfaf1ce 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -247,6 +247,11 @@ void __cpuinit build_clear_page(void)
247 struct uasm_label *l = labels; 247 struct uasm_label *l = labels;
248 struct uasm_reloc *r = relocs; 248 struct uasm_reloc *r = relocs;
249 int i; 249 int i;
250 static atomic_t run_once = ATOMIC_INIT(0);
251
252 if (atomic_xchg(&run_once, 1)) {
253 return;
254 }
250 255
251 memset(labels, 0, sizeof(labels)); 256 memset(labels, 0, sizeof(labels));
252 memset(relocs, 0, sizeof(relocs)); 257 memset(relocs, 0, sizeof(relocs));
@@ -389,6 +394,11 @@ void __cpuinit build_copy_page(void)
389 struct uasm_label *l = labels; 394 struct uasm_label *l = labels;
390 struct uasm_reloc *r = relocs; 395 struct uasm_reloc *r = relocs;
391 int i; 396 int i;
397 static atomic_t run_once = ATOMIC_INIT(0);
398
399 if (atomic_xchg(&run_once, 1)) {
400 return;
401 }
392 402
393 memset(labels, 0, sizeof(labels)); 403 memset(labels, 0, sizeof(labels));
394 memset(relocs, 0, sizeof(relocs)); 404 memset(relocs, 0, sizeof(relocs));
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index a63d1ed0827f..4a13c150f31b 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -51,7 +51,7 @@ void local_flush_tlb_all(void)
51#endif 51#endif
52 52
53 local_irq_save(flags); 53 local_irq_save(flags);
54 old_ctx = read_c0_entryhi() & ASID_MASK; 54 old_ctx = ASID_MASK(read_c0_entryhi());
55 write_c0_entrylo0(0); 55 write_c0_entrylo0(0);
56 entry = r3k_have_wired_reg ? read_c0_wired() : 8; 56 entry = r3k_have_wired_reg ? read_c0_wired() : 8;
57 for (; entry < current_cpu_data.tlbsize; entry++) { 57 for (; entry < current_cpu_data.tlbsize; entry++) {
@@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
87 87
88#ifdef DEBUG_TLB 88#ifdef DEBUG_TLB
89 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", 89 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
90 cpu_context(cpu, mm) & ASID_MASK, start, end); 90 ASID_MASK(cpu_context(cpu, mm)), start, end);
91#endif 91#endif
92 local_irq_save(flags); 92 local_irq_save(flags);
93 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 93 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
94 if (size <= current_cpu_data.tlbsize) { 94 if (size <= current_cpu_data.tlbsize) {
95 int oldpid = read_c0_entryhi() & ASID_MASK; 95 int oldpid = ASID_MASK(read_c0_entryhi());
96 int newpid = cpu_context(cpu, mm) & ASID_MASK; 96 int newpid = ASID_MASK(cpu_context(cpu, mm));
97 97
98 start &= PAGE_MASK; 98 start &= PAGE_MASK;
99 end += PAGE_SIZE - 1; 99 end += PAGE_SIZE - 1;
@@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
166#ifdef DEBUG_TLB 166#ifdef DEBUG_TLB
167 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); 167 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
168#endif 168#endif
169 newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; 169 newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm));
170 page &= PAGE_MASK; 170 page &= PAGE_MASK;
171 local_irq_save(flags); 171 local_irq_save(flags);
172 oldpid = read_c0_entryhi() & ASID_MASK; 172 oldpid = ASID_MASK(read_c0_entryhi());
173 write_c0_entryhi(page | newpid); 173 write_c0_entryhi(page | newpid);
174 BARRIER; 174 BARRIER;
175 tlb_probe(); 175 tlb_probe();
@@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
197 if (current->active_mm != vma->vm_mm) 197 if (current->active_mm != vma->vm_mm)
198 return; 198 return;
199 199
200 pid = read_c0_entryhi() & ASID_MASK; 200 pid = ASID_MASK(read_c0_entryhi());
201 201
202#ifdef DEBUG_TLB 202#ifdef DEBUG_TLB
203 if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { 203 if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) {
204 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", 204 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
205 (cpu_context(cpu, vma->vm_mm)), pid); 205 (cpu_context(cpu, vma->vm_mm)), pid);
206 } 206 }
@@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
241 241
242 local_irq_save(flags); 242 local_irq_save(flags);
243 /* Save old context and create impossible VPN2 value */ 243 /* Save old context and create impossible VPN2 value */
244 old_ctx = read_c0_entryhi() & ASID_MASK; 244 old_ctx = ASID_MASK(read_c0_entryhi());
245 old_pagemask = read_c0_pagemask(); 245 old_pagemask = read_c0_pagemask();
246 w = read_c0_wired(); 246 w = read_c0_wired();
247 write_c0_wired(w + 1); 247 write_c0_wired(w + 1);
@@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
264#endif 264#endif
265 265
266 local_irq_save(flags); 266 local_irq_save(flags);
267 old_ctx = read_c0_entryhi() & ASID_MASK; 267 old_ctx = ASID_MASK(read_c0_entryhi());
268 write_c0_entrylo0(entrylo0); 268 write_c0_entrylo0(entrylo0);
269 write_c0_entryhi(entryhi); 269 write_c0_entryhi(entryhi);
270 write_c0_index(wired); 270 write_c0_index(wired);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 493131c81a29..09653b290d53 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -13,6 +13,7 @@
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/hugetlb.h> 15#include <linux/hugetlb.h>
16#include <linux/module.h>
16 17
17#include <asm/cpu.h> 18#include <asm/cpu.h>
18#include <asm/bootinfo.h> 19#include <asm/bootinfo.h>
@@ -94,6 +95,7 @@ void local_flush_tlb_all(void)
94 FLUSH_ITLB; 95 FLUSH_ITLB;
95 EXIT_CRITICAL(flags); 96 EXIT_CRITICAL(flags);
96} 97}
98EXPORT_SYMBOL(local_flush_tlb_all);
97 99
98/* All entries common to a mm share an asid. To effectively flush 100/* All entries common to a mm share an asid. To effectively flush
99 these entries, we just bump the asid. */ 101 these entries, we just bump the asid. */
@@ -285,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
285 287
286 ENTER_CRITICAL(flags); 288 ENTER_CRITICAL(flags);
287 289
288 pid = read_c0_entryhi() & ASID_MASK; 290 pid = ASID_MASK(read_c0_entryhi());
289 address &= (PAGE_MASK << 1); 291 address &= (PAGE_MASK << 1);
290 write_c0_entryhi(address | pid); 292 write_c0_entryhi(address | pid);
291 pgdp = pgd_offset(vma->vm_mm, address); 293 pgdp = pgd_offset(vma->vm_mm, address);
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 91c2499f806a..122f9207f49e 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
195 if (current->active_mm != vma->vm_mm) 195 if (current->active_mm != vma->vm_mm)
196 return; 196 return;
197 197
198 pid = read_c0_entryhi() & ASID_MASK; 198 pid = ASID_MASK(read_c0_entryhi());
199 199
200 local_irq_save(flags); 200 local_irq_save(flags);
201 address &= PAGE_MASK; 201 address &= PAGE_MASK;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 820e6612d744..4d46d3787576 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -29,6 +29,7 @@
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/cache.h> 30#include <linux/cache.h>
31 31
32#include <asm/mmu_context.h>
32#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
33#include <asm/pgtable.h> 34#include <asm/pgtable.h>
34#include <asm/war.h> 35#include <asm/war.h>
@@ -305,6 +306,78 @@ static struct uasm_reloc relocs[128] __cpuinitdata;
305static int check_for_high_segbits __cpuinitdata; 306static int check_for_high_segbits __cpuinitdata;
306#endif 307#endif
307 308
309static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop,
310 unsigned int i_const)
311{
312 unsigned int **p;
313
314 for (p = start; p < stop; p++) {
315#ifndef CONFIG_CPU_MICROMIPS
316 unsigned int *ip;
317
318 ip = *p;
319 *ip = (*ip & 0xffff0000) | i_const;
320#else
321 unsigned short *ip;
322
323 ip = ((unsigned short *)((unsigned int)*p - 1));
324 if ((*ip & 0xf000) == 0x4000) {
325 *ip &= 0xfff1;
326 *ip |= (i_const << 1);
327 } else if ((*ip & 0xf000) == 0x6000) {
328 *ip &= 0xfff1;
329 *ip |= ((i_const >> 2) << 1);
330 } else {
331 ip++;
332 *ip = i_const;
333 }
334#endif
335 local_flush_icache_range((unsigned long)ip,
336 (unsigned long)ip + sizeof(*ip));
337 }
338}
339
340#define asid_insn_fixup(section, const) \
341do { \
342 extern unsigned int *__start_ ## section; \
343 extern unsigned int *__stop_ ## section; \
344 insn_fixup(&__start_ ## section, &__stop_ ## section, const); \
345} while(0)
346
347/*
348 * Caller is assumed to flush the caches before the first context switch.
349 */
350static void __cpuinit setup_asid(unsigned int inc, unsigned int mask,
351 unsigned int version_mask,
352 unsigned int first_version)
353{
354 extern asmlinkage void handle_ri_rdhwr_vivt(void);
355 unsigned long *vivt_exc;
356
357#ifdef CONFIG_CPU_MICROMIPS
358 /*
359 * Worst case optimised microMIPS addiu instructions support
360 * only a 3-bit immediate value.
361 */
362 if(inc > 7)
363 panic("Invalid ASID increment value!");
364#endif
365 asid_insn_fixup(__asid_inc, inc);
366 asid_insn_fixup(__asid_mask, mask);
367 asid_insn_fixup(__asid_version_mask, version_mask);
368 asid_insn_fixup(__asid_first_version, first_version);
369
370 /* Patch up the 'handle_ri_rdhwr_vivt' handler. */
371 vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt;
372#ifdef CONFIG_CPU_MICROMIPS
373 vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1);
374#endif
375 vivt_exc++;
376 *vivt_exc = (*vivt_exc & ~mask) | mask;
377
378 current_cpu_data.asid_cache = first_version;
379}
380
308static int check_for_high_segbits __cpuinitdata; 381static int check_for_high_segbits __cpuinitdata;
309 382
310static unsigned int kscratch_used_mask __cpuinitdata; 383static unsigned int kscratch_used_mask __cpuinitdata;
@@ -1458,17 +1531,17 @@ u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
1458u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; 1531u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
1459u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; 1532u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
1460#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 1533#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1461u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned; 1534u32 tlbmiss_handler_setup_pgd_array[16] __cacheline_aligned;
1462 1535
1463static void __cpuinit build_r4000_setup_pgd(void) 1536static void __cpuinit build_r4000_setup_pgd(void)
1464{ 1537{
1465 const int a0 = 4; 1538 const int a0 = 4;
1466 const int a1 = 5; 1539 const int a1 = 5;
1467 u32 *p = tlbmiss_handler_setup_pgd; 1540 u32 *p = tlbmiss_handler_setup_pgd_array;
1468 struct uasm_label *l = labels; 1541 struct uasm_label *l = labels;
1469 struct uasm_reloc *r = relocs; 1542 struct uasm_reloc *r = relocs;
1470 1543
1471 memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd)); 1544 memset(tlbmiss_handler_setup_pgd_array, 0, sizeof(tlbmiss_handler_setup_pgd_array));
1472 memset(labels, 0, sizeof(labels)); 1545 memset(labels, 0, sizeof(labels));
1473 memset(relocs, 0, sizeof(relocs)); 1546 memset(relocs, 0, sizeof(relocs));
1474 1547
@@ -1496,15 +1569,15 @@ static void __cpuinit build_r4000_setup_pgd(void)
1496 uasm_i_jr(&p, 31); 1569 uasm_i_jr(&p, 31);
1497 UASM_i_MTC0(&p, a0, 31, pgd_reg); 1570 UASM_i_MTC0(&p, a0, 31, pgd_reg);
1498 } 1571 }
1499 if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd)) 1572 if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array))
1500 panic("tlbmiss_handler_setup_pgd space exceeded"); 1573 panic("tlbmiss_handler_setup_pgd_array space exceeded");
1501 uasm_resolve_relocs(relocs, labels); 1574 uasm_resolve_relocs(relocs, labels);
1502 pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n", 1575 pr_debug("Wrote tlbmiss_handler_setup_pgd_array (%u instructions).\n",
1503 (unsigned int)(p - tlbmiss_handler_setup_pgd)); 1576 (unsigned int)(p - tlbmiss_handler_setup_pgd_array));
1504 1577
1505 dump_handler("tlbmiss_handler", 1578 dump_handler("tlbmiss_handler",
1506 tlbmiss_handler_setup_pgd, 1579 tlbmiss_handler_setup_pgd_array,
1507 ARRAY_SIZE(tlbmiss_handler_setup_pgd)); 1580 ARRAY_SIZE(tlbmiss_handler_setup_pgd_array));
1508} 1581}
1509#endif 1582#endif
1510 1583
@@ -2030,6 +2103,13 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
2030 2103
2031 uasm_l_nopage_tlbl(&l, p); 2104 uasm_l_nopage_tlbl(&l, p);
2032 build_restore_work_registers(&p); 2105 build_restore_work_registers(&p);
2106#ifdef CONFIG_CPU_MICROMIPS
2107 if ((unsigned long)tlb_do_page_fault_0 & 1) {
2108 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
2109 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
2110 uasm_i_jr(&p, K0);
2111 } else
2112#endif
2033 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 2113 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
2034 uasm_i_nop(&p); 2114 uasm_i_nop(&p);
2035 2115
@@ -2077,6 +2157,13 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
2077 2157
2078 uasm_l_nopage_tlbs(&l, p); 2158 uasm_l_nopage_tlbs(&l, p);
2079 build_restore_work_registers(&p); 2159 build_restore_work_registers(&p);
2160#ifdef CONFIG_CPU_MICROMIPS
2161 if ((unsigned long)tlb_do_page_fault_1 & 1) {
2162 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2163 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2164 uasm_i_jr(&p, K0);
2165 } else
2166#endif
2080 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2167 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2081 uasm_i_nop(&p); 2168 uasm_i_nop(&p);
2082 2169
@@ -2125,6 +2212,13 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
2125 2212
2126 uasm_l_nopage_tlbm(&l, p); 2213 uasm_l_nopage_tlbm(&l, p);
2127 build_restore_work_registers(&p); 2214 build_restore_work_registers(&p);
2215#ifdef CONFIG_CPU_MICROMIPS
2216 if ((unsigned long)tlb_do_page_fault_1 & 1) {
2217 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2218 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2219 uasm_i_jr(&p, K0);
2220 } else
2221#endif
2128 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2222 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2129 uasm_i_nop(&p); 2223 uasm_i_nop(&p);
2130 2224
@@ -2162,8 +2256,12 @@ void __cpuinit build_tlb_refill_handler(void)
2162 case CPU_TX3922: 2256 case CPU_TX3922:
2163 case CPU_TX3927: 2257 case CPU_TX3927:
2164#ifndef CONFIG_MIPS_PGD_C0_CONTEXT 2258#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2165 build_r3000_tlb_refill_handler(); 2259 setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
2260 if (cpu_has_local_ebase)
2261 build_r3000_tlb_refill_handler();
2166 if (!run_once) { 2262 if (!run_once) {
2263 if (!cpu_has_local_ebase)
2264 build_r3000_tlb_refill_handler();
2167 build_r3000_tlb_load_handler(); 2265 build_r3000_tlb_load_handler();
2168 build_r3000_tlb_store_handler(); 2266 build_r3000_tlb_store_handler();
2169 build_r3000_tlb_modify_handler(); 2267 build_r3000_tlb_modify_handler();
@@ -2184,6 +2282,11 @@ void __cpuinit build_tlb_refill_handler(void)
2184 break; 2282 break;
2185 2283
2186 default: 2284 default:
2285#ifndef CONFIG_MIPS_MT_SMTC
2286 setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000);
2287#else
2288 setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000);
2289#endif
2187 if (!run_once) { 2290 if (!run_once) {
2188 scratch_reg = allocate_kscratch(); 2291 scratch_reg = allocate_kscratch();
2189#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 2292#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
@@ -2192,9 +2295,12 @@ void __cpuinit build_tlb_refill_handler(void)
2192 build_r4000_tlb_load_handler(); 2295 build_r4000_tlb_load_handler();
2193 build_r4000_tlb_store_handler(); 2296 build_r4000_tlb_store_handler();
2194 build_r4000_tlb_modify_handler(); 2297 build_r4000_tlb_modify_handler();
2298 if (!cpu_has_local_ebase)
2299 build_r4000_tlb_refill_handler();
2195 run_once++; 2300 run_once++;
2196 } 2301 }
2197 build_r4000_tlb_refill_handler(); 2302 if (cpu_has_local_ebase)
2303 build_r4000_tlb_refill_handler();
2198 } 2304 }
2199} 2305}
2200 2306
@@ -2207,7 +2313,7 @@ void __cpuinit flush_tlb_handlers(void)
2207 local_flush_icache_range((unsigned long)handle_tlbm, 2313 local_flush_icache_range((unsigned long)handle_tlbm,
2208 (unsigned long)handle_tlbm + sizeof(handle_tlbm)); 2314 (unsigned long)handle_tlbm + sizeof(handle_tlbm));
2209#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 2315#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
2210 local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd, 2316 local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd_array,
2211 (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm)); 2317 (unsigned long)tlbmiss_handler_setup_pgd_array + sizeof(handle_tlbm));
2212#endif 2318#endif
2213} 2319}
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
new file mode 100644
index 000000000000..162ee6d62788
--- /dev/null
+++ b/arch/mips/mm/uasm-micromips.c
@@ -0,0 +1,221 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * A small micro-assembler. It is intentionally kept simple, does only
7 * support a subset of instructions, and does not try to hide pipeline
8 * effects like branch delay slots.
9 *
10 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
11 * Copyright (C) 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
13 * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
14 */
15
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/init.h>
19
20#include <asm/inst.h>
21#include <asm/elf.h>
22#include <asm/bugs.h>
23#define UASM_ISA _UASM_ISA_MICROMIPS
24#include <asm/uasm.h>
25
26#define RS_MASK 0x1f
27#define RS_SH 16
28#define RT_MASK 0x1f
29#define RT_SH 21
30#define SCIMM_MASK 0x3ff
31#define SCIMM_SH 16
32
33/* This macro sets the non-variable bits of an instruction. */
34#define M(a, b, c, d, e, f) \
35 ((a) << OP_SH \
36 | (b) << RT_SH \
37 | (c) << RS_SH \
38 | (d) << RD_SH \
39 | (e) << RE_SH \
40 | (f) << FUNC_SH)
41
42/* Define these when we are not the ISA the kernel is being compiled with. */
43#ifndef CONFIG_CPU_MICROMIPS
44#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
45#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
46#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
47#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
48#endif
49
50#include "uasm.c"
51
52static struct insn insn_table_MM[] __uasminitdata = {
53 { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD },
54 { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
55 { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD },
56 { insn_andi, M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
57 { insn_beq, M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
58 { insn_beql, 0, 0 },
59 { insn_bgez, M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM },
60 { insn_bgezl, 0, 0 },
61 { insn_bltz, M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM },
62 { insn_bltzl, 0, 0 },
63 { insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM },
64 { insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM },
65 { insn_daddu, 0, 0 },
66 { insn_daddiu, 0, 0 },
67 { insn_dmfc0, 0, 0 },
68 { insn_dmtc0, 0, 0 },
69 { insn_dsll, 0, 0 },
70 { insn_dsll32, 0, 0 },
71 { insn_dsra, 0, 0 },
72 { insn_dsrl, 0, 0 },
73 { insn_dsrl32, 0, 0 },
74 { insn_drotr, 0, 0 },
75 { insn_drotr32, 0, 0 },
76 { insn_dsubu, 0, 0 },
77 { insn_eret, M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0 },
78 { insn_ins, M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE },
79 { insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE },
80 { insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM },
81 { insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM },
82 { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
83 { insn_ld, 0, 0 },
84 { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
85 { insn_lld, 0, 0 },
86 { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },
87 { insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
88 { insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD },
89 { insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD },
90 { insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD },
91 { insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
92 { insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM },
93 { insn_rfe, 0, 0 },
94 { insn_sc, M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM },
95 { insn_scd, 0, 0 },
96 { insn_sd, 0, 0 },
97 { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
98 { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
99 { insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD },
100 { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD },
101 { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD },
102 { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
103 { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 },
104 { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 },
105 { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 },
106 { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 },
107 { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD },
108 { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
109 { insn_dins, 0, 0 },
110 { insn_dinsm, 0, 0 },
111 { insn_syscall, M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM},
112 { insn_bbit0, 0, 0 },
113 { insn_bbit1, 0, 0 },
114 { insn_lwx, 0, 0 },
115 { insn_ldx, 0, 0 },
116 { insn_invalid, 0, 0 }
117};
118
119#undef M
120
121static inline __uasminit u32 build_bimm(s32 arg)
122{
123 WARN(arg > 0xffff || arg < -0x10000,
124 KERN_WARNING "Micro-assembler field overflow\n");
125
126 WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
127
128 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
129}
130
131static inline __uasminit u32 build_jimm(u32 arg)
132{
133
134 WARN(arg & ~((JIMM_MASK << 2) | 1),
135 KERN_WARNING "Micro-assembler field overflow\n");
136
137 return (arg >> 1) & JIMM_MASK;
138}
139
140/*
141 * The order of opcode arguments is implicitly left to right,
142 * starting with RS and ending with FUNC or IMM.
143 */
144static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
145{
146 struct insn *ip = NULL;
147 unsigned int i;
148 va_list ap;
149 u32 op;
150
151 for (i = 0; insn_table_MM[i].opcode != insn_invalid; i++)
152 if (insn_table_MM[i].opcode == opc) {
153 ip = &insn_table_MM[i];
154 break;
155 }
156
157 if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
158 panic("Unsupported Micro-assembler instruction %d", opc);
159
160 op = ip->match;
161 va_start(ap, opc);
162 if (ip->fields & RS) {
163 if (opc == insn_mfc0 || opc == insn_mtc0)
164 op |= build_rt(va_arg(ap, u32));
165 else
166 op |= build_rs(va_arg(ap, u32));
167 }
168 if (ip->fields & RT) {
169 if (opc == insn_mfc0 || opc == insn_mtc0)
170 op |= build_rs(va_arg(ap, u32));
171 else
172 op |= build_rt(va_arg(ap, u32));
173 }
174 if (ip->fields & RD)
175 op |= build_rd(va_arg(ap, u32));
176 if (ip->fields & RE)
177 op |= build_re(va_arg(ap, u32));
178 if (ip->fields & SIMM)
179 op |= build_simm(va_arg(ap, s32));
180 if (ip->fields & UIMM)
181 op |= build_uimm(va_arg(ap, u32));
182 if (ip->fields & BIMM)
183 op |= build_bimm(va_arg(ap, s32));
184 if (ip->fields & JIMM)
185 op |= build_jimm(va_arg(ap, u32));
186 if (ip->fields & FUNC)
187 op |= build_func(va_arg(ap, u32));
188 if (ip->fields & SET)
189 op |= build_set(va_arg(ap, u32));
190 if (ip->fields & SCIMM)
191 op |= build_scimm(va_arg(ap, u32));
192 va_end(ap);
193
194#ifdef CONFIG_CPU_LITTLE_ENDIAN
195 **buf = ((op & 0xffff) << 16) | (op >> 16);
196#else
197 **buf = op;
198#endif
199 (*buf)++;
200}
201
202static inline void __uasminit
203__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
204{
205 long laddr = (long)lab->addr;
206 long raddr = (long)rel->addr;
207
208 switch (rel->type) {
209 case R_MIPS_PC16:
210#ifdef CONFIG_CPU_LITTLE_ENDIAN
211 *rel->addr |= (build_bimm(laddr - (raddr + 4)) << 16);
212#else
213 *rel->addr |= build_bimm(laddr - (raddr + 4));
214#endif
215 break;
216
217 default:
218 panic("Unsupported Micro-assembler relocation %d",
219 rel->type);
220 }
221}
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
new file mode 100644
index 000000000000..5fcdd8fe3e83
--- /dev/null
+++ b/arch/mips/mm/uasm-mips.c
@@ -0,0 +1,205 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * A small micro-assembler. It is intentionally kept simple, does only
7 * support a subset of instructions, and does not try to hide pipeline
8 * effects like branch delay slots.
9 *
10 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
11 * Copyright (C) 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
13 * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
14 */
15
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/init.h>
19
20#include <asm/inst.h>
21#include <asm/elf.h>
22#include <asm/bugs.h>
23#define UASM_ISA _UASM_ISA_CLASSIC
24#include <asm/uasm.h>
25
26#define RS_MASK 0x1f
27#define RS_SH 21
28#define RT_MASK 0x1f
29#define RT_SH 16
30#define SCIMM_MASK 0xfffff
31#define SCIMM_SH 6
32
33/* This macro sets the non-variable bits of an instruction. */
34#define M(a, b, c, d, e, f) \
35 ((a) << OP_SH \
36 | (b) << RS_SH \
37 | (c) << RT_SH \
38 | (d) << RD_SH \
39 | (e) << RE_SH \
40 | (f) << FUNC_SH)
41
42/* Define these when we are not the ISA the kernel is being compiled with. */
43#ifdef CONFIG_CPU_MICROMIPS
44#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
45#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
46#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
47#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
48#endif
49
50#include "uasm.c"
51
52static struct insn insn_table[] __uasminitdata = {
53 { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
54 { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
55 { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
56 { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
57 { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
58 { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
59 { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
60 { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
61 { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
62 { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
63 { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
64 { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
65 { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
66 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
67 { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
68 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
69 { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
70 { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
71 { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
72 { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
73 { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
74 { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
75 { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
76 { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
77 { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
78 { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
79 { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
80 { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
81 { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
82 { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
83 { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
84 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
85 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
86 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
87 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
88 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
89 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
90 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
91 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
92 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
93 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
94 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
95 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
96 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
97 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
98 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
99 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
100 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
101 { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
102 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
103 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
104 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
105 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
106 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
107 { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
108 { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
109 { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
110 { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
111 { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
112 { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
113 { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
114 { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
115 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
116 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
117 { insn_invalid, 0, 0 }
118};
119
120#undef M
121
122static inline __uasminit u32 build_bimm(s32 arg)
123{
124 WARN(arg > 0x1ffff || arg < -0x20000,
125 KERN_WARNING "Micro-assembler field overflow\n");
126
127 WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
128
129 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
130}
131
132static inline __uasminit u32 build_jimm(u32 arg)
133{
134 WARN(arg & ~(JIMM_MASK << 2),
135 KERN_WARNING "Micro-assembler field overflow\n");
136
137 return (arg >> 2) & JIMM_MASK;
138}
139
140/*
141 * The order of opcode arguments is implicitly left to right,
142 * starting with RS and ending with FUNC or IMM.
143 */
144static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
145{
146 struct insn *ip = NULL;
147 unsigned int i;
148 va_list ap;
149 u32 op;
150
151 for (i = 0; insn_table[i].opcode != insn_invalid; i++)
152 if (insn_table[i].opcode == opc) {
153 ip = &insn_table[i];
154 break;
155 }
156
157 if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
158 panic("Unsupported Micro-assembler instruction %d", opc);
159
160 op = ip->match;
161 va_start(ap, opc);
162 if (ip->fields & RS)
163 op |= build_rs(va_arg(ap, u32));
164 if (ip->fields & RT)
165 op |= build_rt(va_arg(ap, u32));
166 if (ip->fields & RD)
167 op |= build_rd(va_arg(ap, u32));
168 if (ip->fields & RE)
169 op |= build_re(va_arg(ap, u32));
170 if (ip->fields & SIMM)
171 op |= build_simm(va_arg(ap, s32));
172 if (ip->fields & UIMM)
173 op |= build_uimm(va_arg(ap, u32));
174 if (ip->fields & BIMM)
175 op |= build_bimm(va_arg(ap, s32));
176 if (ip->fields & JIMM)
177 op |= build_jimm(va_arg(ap, u32));
178 if (ip->fields & FUNC)
179 op |= build_func(va_arg(ap, u32));
180 if (ip->fields & SET)
181 op |= build_set(va_arg(ap, u32));
182 if (ip->fields & SCIMM)
183 op |= build_scimm(va_arg(ap, u32));
184 va_end(ap);
185
186 **buf = op;
187 (*buf)++;
188}
189
190static inline void __uasminit
191__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
192{
193 long laddr = (long)lab->addr;
194 long raddr = (long)rel->addr;
195
196 switch (rel->type) {
197 case R_MIPS_PC16:
198 *rel->addr |= build_bimm(laddr - (raddr + 4));
199 break;
200
201 default:
202 panic("Unsupported Micro-assembler relocation %d",
203 rel->type);
204 }
205}
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 942ff6c2eba2..7eb5e4355d25 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -10,17 +10,9 @@
10 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 10 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
11 * Copyright (C) 2005, 2007 Maciej W. Rozycki 11 * Copyright (C) 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 12 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
13 * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved.
13 */ 14 */
14 15
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/init.h>
18
19#include <asm/inst.h>
20#include <asm/elf.h>
21#include <asm/bugs.h>
22#include <asm/uasm.h>
23
24enum fields { 16enum fields {
25 RS = 0x001, 17 RS = 0x001,
26 RT = 0x002, 18 RT = 0x002,
@@ -37,10 +29,6 @@ enum fields {
37 29
38#define OP_MASK 0x3f 30#define OP_MASK 0x3f
39#define OP_SH 26 31#define OP_SH 26
40#define RS_MASK 0x1f
41#define RS_SH 21
42#define RT_MASK 0x1f
43#define RT_SH 16
44#define RD_MASK 0x1f 32#define RD_MASK 0x1f
45#define RD_SH 11 33#define RD_SH 11
46#define RE_MASK 0x1f 34#define RE_MASK 0x1f
@@ -53,8 +41,6 @@ enum fields {
53#define FUNC_SH 0 41#define FUNC_SH 0
54#define SET_MASK 0x7 42#define SET_MASK 0x7
55#define SET_SH 0 43#define SET_SH 0
56#define SCIMM_MASK 0xfffff
57#define SCIMM_SH 6
58 44
59enum opcode { 45enum opcode {
60 insn_invalid, 46 insn_invalid,
@@ -77,85 +63,6 @@ struct insn {
77 enum fields fields; 63 enum fields fields;
78}; 64};
79 65
80/* This macro sets the non-variable bits of an instruction. */
81#define M(a, b, c, d, e, f) \
82 ((a) << OP_SH \
83 | (b) << RS_SH \
84 | (c) << RT_SH \
85 | (d) << RD_SH \
86 | (e) << RE_SH \
87 | (f) << FUNC_SH)
88
89static struct insn insn_table[] __uasminitdata = {
90 { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
91 { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
92 { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
93 { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
94 { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
95 { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
96 { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
97 { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
98 { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
99 { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
100 { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
101 { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
102 { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
103 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
104 { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
105 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
106 { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
107 { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
108 { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
109 { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
110 { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
111 { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
112 { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
113 { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
114 { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
115 { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
116 { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
117 { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
118 { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
119 { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
120 { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
121 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
122 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
123 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
124 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
125 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
126 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
127 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
128 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
129 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
130 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
131 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
132 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
133 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
134 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
135 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
136 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
137 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
138 { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
139 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
140 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
141 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
142 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
143 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
144 { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
145 { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
146 { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
147 { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
148 { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
149 { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
150 { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
151 { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
152 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
153 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
154 { insn_invalid, 0, 0 }
155};
156
157#undef M
158
159static inline __uasminit u32 build_rs(u32 arg) 66static inline __uasminit u32 build_rs(u32 arg)
160{ 67{
161 WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 68 WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
@@ -199,24 +106,6 @@ static inline __uasminit u32 build_uimm(u32 arg)
199 return arg & IMM_MASK; 106 return arg & IMM_MASK;
200} 107}
201 108
202static inline __uasminit u32 build_bimm(s32 arg)
203{
204 WARN(arg > 0x1ffff || arg < -0x20000,
205 KERN_WARNING "Micro-assembler field overflow\n");
206
207 WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
208
209 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
210}
211
212static inline __uasminit u32 build_jimm(u32 arg)
213{
214 WARN(arg & ~(JIMM_MASK << 2),
215 KERN_WARNING "Micro-assembler field overflow\n");
216
217 return (arg >> 2) & JIMM_MASK;
218}
219
220static inline __uasminit u32 build_scimm(u32 arg) 109static inline __uasminit u32 build_scimm(u32 arg)
221{ 110{
222 WARN(arg & ~SCIMM_MASK, 111 WARN(arg & ~SCIMM_MASK,
@@ -239,55 +128,7 @@ static inline __uasminit u32 build_set(u32 arg)
239 return arg & SET_MASK; 128 return arg & SET_MASK;
240} 129}
241 130
242/* 131static void __uasminit build_insn(u32 **buf, enum opcode opc, ...);
243 * The order of opcode arguments is implicitly left to right,
244 * starting with RS and ending with FUNC or IMM.
245 */
246static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
247{
248 struct insn *ip = NULL;
249 unsigned int i;
250 va_list ap;
251 u32 op;
252
253 for (i = 0; insn_table[i].opcode != insn_invalid; i++)
254 if (insn_table[i].opcode == opc) {
255 ip = &insn_table[i];
256 break;
257 }
258
259 if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
260 panic("Unsupported Micro-assembler instruction %d", opc);
261
262 op = ip->match;
263 va_start(ap, opc);
264 if (ip->fields & RS)
265 op |= build_rs(va_arg(ap, u32));
266 if (ip->fields & RT)
267 op |= build_rt(va_arg(ap, u32));
268 if (ip->fields & RD)
269 op |= build_rd(va_arg(ap, u32));
270 if (ip->fields & RE)
271 op |= build_re(va_arg(ap, u32));
272 if (ip->fields & SIMM)
273 op |= build_simm(va_arg(ap, s32));
274 if (ip->fields & UIMM)
275 op |= build_uimm(va_arg(ap, u32));
276 if (ip->fields & BIMM)
277 op |= build_bimm(va_arg(ap, s32));
278 if (ip->fields & JIMM)
279 op |= build_jimm(va_arg(ap, u32));
280 if (ip->fields & FUNC)
281 op |= build_func(va_arg(ap, u32));
282 if (ip->fields & SET)
283 op |= build_set(va_arg(ap, u32));
284 if (ip->fields & SCIMM)
285 op |= build_scimm(va_arg(ap, u32));
286 va_end(ap);
287
288 **buf = op;
289 (*buf)++;
290}
291 132
292#define I_u1u2u3(op) \ 133#define I_u1u2u3(op) \
293Ip_u1u2u3(op) \ 134Ip_u1u2u3(op) \
@@ -445,7 +286,7 @@ I_u3u1u2(_ldx)
445 286
446#ifdef CONFIG_CPU_CAVIUM_OCTEON 287#ifdef CONFIG_CPU_CAVIUM_OCTEON
447#include <asm/octeon/octeon.h> 288#include <asm/octeon/octeon.h>
448void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b, 289void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
449 unsigned int c) 290 unsigned int c)
450{ 291{
451 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) 292 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
@@ -457,21 +298,21 @@ void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b,
457 else 298 else
458 build_insn(buf, insn_pref, c, a, b); 299 build_insn(buf, insn_pref, c, a, b);
459} 300}
460UASM_EXPORT_SYMBOL(uasm_i_pref); 301UASM_EXPORT_SYMBOL(ISAFUNC(uasm_i_pref));
461#else 302#else
462I_u2s3u1(_pref) 303I_u2s3u1(_pref)
463#endif 304#endif
464 305
465/* Handle labels. */ 306/* Handle labels. */
466void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) 307void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
467{ 308{
468 (*lab)->addr = addr; 309 (*lab)->addr = addr;
469 (*lab)->lab = lid; 310 (*lab)->lab = lid;
470 (*lab)++; 311 (*lab)++;
471} 312}
472UASM_EXPORT_SYMBOL(uasm_build_label); 313UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
473 314
474int __uasminit uasm_in_compat_space_p(long addr) 315int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr)
475{ 316{
476 /* Is this address in 32bit compat space? */ 317 /* Is this address in 32bit compat space? */
477#ifdef CONFIG_64BIT 318#ifdef CONFIG_64BIT
@@ -480,7 +321,7 @@ int __uasminit uasm_in_compat_space_p(long addr)
480 return 1; 321 return 1;
481#endif 322#endif
482} 323}
483UASM_EXPORT_SYMBOL(uasm_in_compat_space_p); 324UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
484 325
485static int __uasminit uasm_rel_highest(long val) 326static int __uasminit uasm_rel_highest(long val)
486{ 327{
@@ -500,77 +341,66 @@ static int __uasminit uasm_rel_higher(long val)
500#endif 341#endif
501} 342}
502 343
503int __uasminit uasm_rel_hi(long val) 344int __uasminit ISAFUNC(uasm_rel_hi)(long val)
504{ 345{
505 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; 346 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
506} 347}
507UASM_EXPORT_SYMBOL(uasm_rel_hi); 348UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
508 349
509int __uasminit uasm_rel_lo(long val) 350int __uasminit ISAFUNC(uasm_rel_lo)(long val)
510{ 351{
511 return ((val & 0xffff) ^ 0x8000) - 0x8000; 352 return ((val & 0xffff) ^ 0x8000) - 0x8000;
512} 353}
513UASM_EXPORT_SYMBOL(uasm_rel_lo); 354UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
514 355
515void __uasminit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr) 356void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
516{ 357{
517 if (!uasm_in_compat_space_p(addr)) { 358 if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
518 uasm_i_lui(buf, rs, uasm_rel_highest(addr)); 359 ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
519 if (uasm_rel_higher(addr)) 360 if (uasm_rel_higher(addr))
520 uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr)); 361 ISAFUNC(uasm_i_daddiu)(buf, rs, rs, uasm_rel_higher(addr));
521 if (uasm_rel_hi(addr)) { 362 if (ISAFUNC(uasm_rel_hi(addr))) {
522 uasm_i_dsll(buf, rs, rs, 16); 363 ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
523 uasm_i_daddiu(buf, rs, rs, uasm_rel_hi(addr)); 364 ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
524 uasm_i_dsll(buf, rs, rs, 16); 365 ISAFUNC(uasm_rel_hi)(addr));
366 ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
525 } else 367 } else
526 uasm_i_dsll32(buf, rs, rs, 0); 368 ISAFUNC(uasm_i_dsll32)(buf, rs, rs, 0);
527 } else 369 } else
528 uasm_i_lui(buf, rs, uasm_rel_hi(addr)); 370 ISAFUNC(uasm_i_lui)(buf, rs, ISAFUNC(uasm_rel_hi(addr)));
529} 371}
530UASM_EXPORT_SYMBOL(UASM_i_LA_mostly); 372UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
531 373
532void __uasminit UASM_i_LA(u32 **buf, unsigned int rs, long addr) 374void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
533{ 375{
534 UASM_i_LA_mostly(buf, rs, addr); 376 ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
535 if (uasm_rel_lo(addr)) { 377 if (ISAFUNC(uasm_rel_lo(addr))) {
536 if (!uasm_in_compat_space_p(addr)) 378 if (!ISAFUNC(uasm_in_compat_space_p)(addr))
537 uasm_i_daddiu(buf, rs, rs, uasm_rel_lo(addr)); 379 ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
380 ISAFUNC(uasm_rel_lo(addr)));
538 else 381 else
539 uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr)); 382 ISAFUNC(uasm_i_addiu)(buf, rs, rs,
383 ISAFUNC(uasm_rel_lo(addr)));
540 } 384 }
541} 385}
542UASM_EXPORT_SYMBOL(UASM_i_LA); 386UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
543 387
544/* Handle relocations. */ 388/* Handle relocations. */
545void __uasminit 389void __uasminit
546uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid) 390ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
547{ 391{
548 (*rel)->addr = addr; 392 (*rel)->addr = addr;
549 (*rel)->type = R_MIPS_PC16; 393 (*rel)->type = R_MIPS_PC16;
550 (*rel)->lab = lid; 394 (*rel)->lab = lid;
551 (*rel)++; 395 (*rel)++;
552} 396}
553UASM_EXPORT_SYMBOL(uasm_r_mips_pc16); 397UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
554 398
555static inline void __uasminit 399static inline void __uasminit
556__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 400__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
557{
558 long laddr = (long)lab->addr;
559 long raddr = (long)rel->addr;
560
561 switch (rel->type) {
562 case R_MIPS_PC16:
563 *rel->addr |= build_bimm(laddr - (raddr + 4));
564 break;
565
566 default:
567 panic("Unsupported Micro-assembler relocation %d",
568 rel->type);
569 }
570}
571 401
572void __uasminit 402void __uasminit
573uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 403ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab)
574{ 404{
575 struct uasm_label *l; 405 struct uasm_label *l;
576 406
@@ -579,40 +409,40 @@ uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
579 if (rel->lab == l->lab) 409 if (rel->lab == l->lab)
580 __resolve_relocs(rel, l); 410 __resolve_relocs(rel, l);
581} 411}
582UASM_EXPORT_SYMBOL(uasm_resolve_relocs); 412UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
583 413
584void __uasminit 414void __uasminit
585uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off) 415ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
586{ 416{
587 for (; rel->lab != UASM_LABEL_INVALID; rel++) 417 for (; rel->lab != UASM_LABEL_INVALID; rel++)
588 if (rel->addr >= first && rel->addr < end) 418 if (rel->addr >= first && rel->addr < end)
589 rel->addr += off; 419 rel->addr += off;
590} 420}
591UASM_EXPORT_SYMBOL(uasm_move_relocs); 421UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
592 422
593void __uasminit 423void __uasminit
594uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off) 424ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off)
595{ 425{
596 for (; lab->lab != UASM_LABEL_INVALID; lab++) 426 for (; lab->lab != UASM_LABEL_INVALID; lab++)
597 if (lab->addr >= first && lab->addr < end) 427 if (lab->addr >= first && lab->addr < end)
598 lab->addr += off; 428 lab->addr += off;
599} 429}
600UASM_EXPORT_SYMBOL(uasm_move_labels); 430UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
601 431
602void __uasminit 432void __uasminit
603uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, 433ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
604 u32 *end, u32 *target) 434 u32 *end, u32 *target)
605{ 435{
606 long off = (long)(target - first); 436 long off = (long)(target - first);
607 437
608 memcpy(target, first, (end - first) * sizeof(u32)); 438 memcpy(target, first, (end - first) * sizeof(u32));
609 439
610 uasm_move_relocs(rel, first, end, off); 440 ISAFUNC(uasm_move_relocs(rel, first, end, off));
611 uasm_move_labels(lab, first, end, off); 441 ISAFUNC(uasm_move_labels(lab, first, end, off));
612} 442}
613UASM_EXPORT_SYMBOL(uasm_copy_handler); 443UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
614 444
615int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr) 445int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
616{ 446{
617 for (; rel->lab != UASM_LABEL_INVALID; rel++) { 447 for (; rel->lab != UASM_LABEL_INVALID; rel++) {
618 if (rel->addr == addr 448 if (rel->addr == addr
@@ -623,88 +453,88 @@ int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
623 453
624 return 0; 454 return 0;
625} 455}
626UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay); 456UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
627 457
628/* Convenience functions for labeled branches. */ 458/* Convenience functions for labeled branches. */
629void __uasminit 459void __uasminit
630uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 460ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
631{ 461{
632 uasm_r_mips_pc16(r, *p, lid); 462 uasm_r_mips_pc16(r, *p, lid);
633 uasm_i_bltz(p, reg, 0); 463 ISAFUNC(uasm_i_bltz)(p, reg, 0);
634} 464}
635UASM_EXPORT_SYMBOL(uasm_il_bltz); 465UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
636 466
637void __uasminit 467void __uasminit
638uasm_il_b(u32 **p, struct uasm_reloc **r, int lid) 468ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
639{ 469{
640 uasm_r_mips_pc16(r, *p, lid); 470 uasm_r_mips_pc16(r, *p, lid);
641 uasm_i_b(p, 0); 471 ISAFUNC(uasm_i_b)(p, 0);
642} 472}
643UASM_EXPORT_SYMBOL(uasm_il_b); 473UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
644 474
645void __uasminit 475void __uasminit
646uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 476ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
647{ 477{
648 uasm_r_mips_pc16(r, *p, lid); 478 uasm_r_mips_pc16(r, *p, lid);
649 uasm_i_beqz(p, reg, 0); 479 ISAFUNC(uasm_i_beqz)(p, reg, 0);
650} 480}
651UASM_EXPORT_SYMBOL(uasm_il_beqz); 481UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
652 482
653void __uasminit 483void __uasminit
654uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 484ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
655{ 485{
656 uasm_r_mips_pc16(r, *p, lid); 486 uasm_r_mips_pc16(r, *p, lid);
657 uasm_i_beqzl(p, reg, 0); 487 ISAFUNC(uasm_i_beqzl)(p, reg, 0);
658} 488}
659UASM_EXPORT_SYMBOL(uasm_il_beqzl); 489UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
660 490
661void __uasminit 491void __uasminit
662uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1, 492ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
663 unsigned int reg2, int lid) 493 unsigned int reg2, int lid)
664{ 494{
665 uasm_r_mips_pc16(r, *p, lid); 495 uasm_r_mips_pc16(r, *p, lid);
666 uasm_i_bne(p, reg1, reg2, 0); 496 ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
667} 497}
668UASM_EXPORT_SYMBOL(uasm_il_bne); 498UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
669 499
670void __uasminit 500void __uasminit
671uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 501ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
672{ 502{
673 uasm_r_mips_pc16(r, *p, lid); 503 uasm_r_mips_pc16(r, *p, lid);
674 uasm_i_bnez(p, reg, 0); 504 ISAFUNC(uasm_i_bnez)(p, reg, 0);
675} 505}
676UASM_EXPORT_SYMBOL(uasm_il_bnez); 506UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
677 507
678void __uasminit 508void __uasminit
679uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 509ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
680{ 510{
681 uasm_r_mips_pc16(r, *p, lid); 511 uasm_r_mips_pc16(r, *p, lid);
682 uasm_i_bgezl(p, reg, 0); 512 ISAFUNC(uasm_i_bgezl)(p, reg, 0);
683} 513}
684UASM_EXPORT_SYMBOL(uasm_il_bgezl); 514UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
685 515
686void __uasminit 516void __uasminit
687uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 517ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
688{ 518{
689 uasm_r_mips_pc16(r, *p, lid); 519 uasm_r_mips_pc16(r, *p, lid);
690 uasm_i_bgez(p, reg, 0); 520 ISAFUNC(uasm_i_bgez)(p, reg, 0);
691} 521}
692UASM_EXPORT_SYMBOL(uasm_il_bgez); 522UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
693 523
694void __uasminit 524void __uasminit
695uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg, 525ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
696 unsigned int bit, int lid) 526 unsigned int bit, int lid)
697{ 527{
698 uasm_r_mips_pc16(r, *p, lid); 528 uasm_r_mips_pc16(r, *p, lid);
699 uasm_i_bbit0(p, reg, bit, 0); 529 ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
700} 530}
701UASM_EXPORT_SYMBOL(uasm_il_bbit0); 531UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
702 532
703void __uasminit 533void __uasminit
704uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg, 534ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
705 unsigned int bit, int lid) 535 unsigned int bit, int lid)
706{ 536{
707 uasm_r_mips_pc16(r, *p, lid); 537 uasm_r_mips_pc16(r, *p, lid);
708 uasm_i_bbit1(p, reg, bit, 0); 538 ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);
709} 539}
710UASM_EXPORT_SYMBOL(uasm_il_bbit1); 540UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit1));
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile
index 6079ef33b5f0..0388fc8b5613 100644
--- a/arch/mips/mti-malta/Makefile
+++ b/arch/mips/mti-malta/Makefile
@@ -5,9 +5,8 @@
5# Copyright (C) 2008 Wind River Systems, Inc. 5# Copyright (C) 2008 Wind River Systems, Inc.
6# written by Ralf Baechle <ralf@linux-mips.org> 6# written by Ralf Baechle <ralf@linux-mips.org>
7# 7#
8obj-y := malta-amon.o malta-cmdline.o \ 8obj-y := malta-amon.o malta-display.o malta-init.o \
9 malta-display.o malta-init.o malta-int.o \ 9 malta-int.o malta-memory.o malta-platform.o \
10 malta-memory.o malta-platform.o \
11 malta-reset.o malta-setup.o malta-time.o 10 malta-reset.o malta-setup.o malta-time.o
12 11
13obj-$(CONFIG_EARLY_PRINTK) += malta-console.o 12obj-$(CONFIG_EARLY_PRINTK) += malta-console.o
diff --git a/arch/mips/mti-malta/Platform b/arch/mips/mti-malta/Platform
index 5b548b5a4fcf..2cc72c9b38e3 100644
--- a/arch/mips/mti-malta/Platform
+++ b/arch/mips/mti-malta/Platform
@@ -3,5 +3,9 @@
3# 3#
4platform-$(CONFIG_MIPS_MALTA) += mti-malta/ 4platform-$(CONFIG_MIPS_MALTA) += mti-malta/
5cflags-$(CONFIG_MIPS_MALTA) += -I$(srctree)/arch/mips/include/asm/mach-malta 5cflags-$(CONFIG_MIPS_MALTA) += -I$(srctree)/arch/mips/include/asm/mach-malta
6load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000 6ifdef CONFIG_KVM_GUEST
7 load-$(CONFIG_MIPS_MALTA) += 0x0000000040100000
8else
9 load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000
10endif
7all-$(CONFIG_MIPS_MALTA) := $(COMPRESSION_FNAME).bin 11all-$(CONFIG_MIPS_MALTA) := $(COMPRESSION_FNAME).bin
diff --git a/arch/mips/mti-malta/malta-cmdline.c b/arch/mips/mti-malta/malta-cmdline.c
deleted file mode 100644
index 5576a306a145..000000000000
--- a/arch/mips/mti-malta/malta-cmdline.c
+++ /dev/null
@@ -1,59 +0,0 @@
1/*
2 * Carsten Langgaard, carstenl@mips.com
3 * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
4 *
5 * This program is free software; you can distribute it and/or modify it
6 * under the terms of the GNU General Public License (Version 2) as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
17 *
18 * Kernel command line creation using the prom monitor (YAMON) argc/argv.
19 */
20#include <linux/init.h>
21#include <linux/string.h>
22
23#include <asm/bootinfo.h>
24
25extern int prom_argc;
26extern int *_prom_argv;
27
28/*
29 * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
30 * This macro take care of sign extension.
31 */
32#define prom_argv(index) ((char *)(long)_prom_argv[(index)])
33
34char * __init prom_getcmdline(void)
35{
36 return &(arcs_cmdline[0]);
37}
38
39
40void __init prom_init_cmdline(void)
41{
42 char *cp;
43 int actr;
44
45 actr = 1; /* Always ignore argv[0] */
46
47 cp = &(arcs_cmdline[0]);
48 while(actr < prom_argc) {
49 strcpy(cp, prom_argv(actr));
50 cp += strlen(prom_argv(actr));
51 *cp++ = ' ';
52 actr++;
53 }
54 if (cp != &(arcs_cmdline[0])) {
55 /* get rid of trailing space */
56 --cp;
57 *cp = '\0';
58 }
59}
diff --git a/arch/mips/mti-malta/malta-display.c b/arch/mips/mti-malta/malta-display.c
index 9bc58a24e80a..d4f807191ecd 100644
--- a/arch/mips/mti-malta/malta-display.c
+++ b/arch/mips/mti-malta/malta-display.c
@@ -1,28 +1,20 @@
1/* 1/*
2 * Carsten Langgaard, carstenl@mips.com 2 * This file is subject to the terms and conditions of the GNU General Public
3 * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. 3 * License. See the file "COPYING" in the main directory of this archive
4 * 4 * for more details.
5 * This program is free software; you can distribute it and/or modify it
6 * under the terms of the GNU General Public License (Version 2) as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
17 * 5 *
18 * Display routines for display messages in MIPS boards ascii display. 6 * Display routines for display messages in MIPS boards ascii display.
7 *
8 * Copyright (C) 1999,2000,2012 MIPS Technologies, Inc.
9 * All rights reserved.
10 * Authors: Carsten Langgaard <carstenl@mips.com>
11 * Steven J. Hill <sjhill@mips.com>
19 */ 12 */
20
21#include <linux/compiler.h> 13#include <linux/compiler.h>
22#include <linux/timer.h> 14#include <linux/timer.h>
23#include <asm/io.h> 15#include <linux/io.h>
16
24#include <asm/mips-boards/generic.h> 17#include <asm/mips-boards/generic.h>
25#include <asm/mips-boards/prom.h>
26 18
27extern const char display_string[]; 19extern const char display_string[];
28static unsigned int display_count; 20static unsigned int display_count;
@@ -36,11 +28,11 @@ void mips_display_message(const char *str)
36 if (unlikely(display == NULL)) 28 if (unlikely(display == NULL))
37 display = ioremap(ASCII_DISPLAY_POS_BASE, 16*sizeof(int)); 29 display = ioremap(ASCII_DISPLAY_POS_BASE, 16*sizeof(int));
38 30
39 for (i = 0; i <= 14; i=i+2) { 31 for (i = 0; i <= 14; i += 2) {
40 if (*str) 32 if (*str)
41 __raw_writel(*str++, display + i); 33 __raw_writel(*str++, display + i);
42 else 34 else
43 __raw_writel(' ', display + i); 35 __raw_writel(' ', display + i);
44 } 36 }
45} 37}
46 38
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index c2cbce9e435e..ff8caffd3266 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -1,54 +1,28 @@
1/* 1/*
2 * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc. 2 * This file is subject to the terms and conditions of the GNU General Public
3 * All rights reserved. 3 * License. See the file "COPYING" in the main directory of this archive
4 * Authors: Carsten Langgaard <carstenl@mips.com> 4 * for more details.
5 * Maciej W. Rozycki <macro@mips.com>
6 *
7 * This program is free software; you can distribute it and/or modify it
8 * under the terms of the GNU General Public License (Version 2) as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
19 * 5 *
20 * PROM library initialisation code. 6 * PROM library initialisation code.
7 *
8 * Copyright (C) 1999,2000,2004,2005,2012 MIPS Technologies, Inc.
9 * All rights reserved.
10 * Authors: Carsten Langgaard <carstenl@mips.com>
11 * Maciej W. Rozycki <macro@mips.com>
12 * Steven J. Hill <sjhill@mips.com>
21 */ 13 */
22#include <linux/init.h> 14#include <linux/init.h>
23#include <linux/string.h> 15#include <linux/string.h>
24#include <linux/kernel.h> 16#include <linux/kernel.h>
25 17
26#include <asm/bootinfo.h>
27#include <asm/gt64120.h>
28#include <asm/io.h>
29#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
30#include <asm/smp-ops.h> 19#include <asm/smp-ops.h>
31#include <asm/traps.h> 20#include <asm/traps.h>
32 21#include <asm/fw/fw.h>
33#include <asm/gcmpregs.h> 22#include <asm/gcmpregs.h>
34#include <asm/mips-boards/prom.h>
35#include <asm/mips-boards/generic.h> 23#include <asm/mips-boards/generic.h>
36#include <asm/mips-boards/bonito64.h>
37#include <asm/mips-boards/msc01_pci.h>
38
39#include <asm/mips-boards/malta.h> 24#include <asm/mips-boards/malta.h>
40 25
41int prom_argc;
42int *_prom_argv, *_prom_envp;
43
44/*
45 * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
46 * This macro take care of sign extension, if running in 64-bit mode.
47 */
48#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
49
50int init_debug;
51
52static int mips_revision_corid; 26static int mips_revision_corid;
53int mips_revision_sconid; 27int mips_revision_sconid;
54 28
@@ -62,74 +36,6 @@ unsigned long _pcictrl_gt64120;
62/* MIPS System controller register base */ 36/* MIPS System controller register base */
63unsigned long _pcictrl_msc; 37unsigned long _pcictrl_msc;
64 38
65char *prom_getenv(char *envname)
66{
67 /*
68 * Return a pointer to the given environment variable.
69 * In 64-bit mode: we're using 64-bit pointers, but all pointers
70 * in the PROM structures are only 32-bit, so we need some
71 * workarounds, if we are running in 64-bit mode.
72 */
73 int i, index=0;
74
75 i = strlen(envname);
76
77 while (prom_envp(index)) {
78 if(strncmp(envname, prom_envp(index), i) == 0) {
79 return(prom_envp(index+1));
80 }
81 index += 2;
82 }
83
84 return NULL;
85}
86
87static inline unsigned char str2hexnum(unsigned char c)
88{
89 if (c >= '0' && c <= '9')
90 return c - '0';
91 if (c >= 'a' && c <= 'f')
92 return c - 'a' + 10;
93 return 0; /* foo */
94}
95
96static inline void str2eaddr(unsigned char *ea, unsigned char *str)
97{
98 int i;
99
100 for (i = 0; i < 6; i++) {
101 unsigned char num;
102
103 if((*str == '.') || (*str == ':'))
104 str++;
105 num = str2hexnum(*str++) << 4;
106 num |= (str2hexnum(*str++));
107 ea[i] = num;
108 }
109}
110
111int get_ethernet_addr(char *ethernet_addr)
112{
113 char *ethaddr_str;
114
115 ethaddr_str = prom_getenv("ethaddr");
116 if (!ethaddr_str) {
117 printk("ethaddr not set in boot prom\n");
118 return -1;
119 }
120 str2eaddr(ethernet_addr, ethaddr_str);
121
122 if (init_debug > 1) {
123 int i;
124 printk("get_ethernet_addr: ");
125 for (i=0; i<5; i++)
126 printk("%02x:", (unsigned char)*(ethernet_addr+i));
127 printk("%02x\n", *(ethernet_addr+i));
128 }
129
130 return 0;
131}
132
133#ifdef CONFIG_SERIAL_8250_CONSOLE 39#ifdef CONFIG_SERIAL_8250_CONSOLE
134static void __init console_config(void) 40static void __init console_config(void)
135{ 41{
@@ -138,17 +44,23 @@ static void __init console_config(void)
138 char parity = '\0', bits = '\0', flow = '\0'; 44 char parity = '\0', bits = '\0', flow = '\0';
139 char *s; 45 char *s;
140 46
141 if ((strstr(prom_getcmdline(), "console=")) == NULL) { 47 if ((strstr(fw_getcmdline(), "console=")) == NULL) {
142 s = prom_getenv("modetty0"); 48 s = fw_getenv("modetty0");
143 if (s) { 49 if (s) {
144 while (*s >= '0' && *s <= '9') 50 while (*s >= '0' && *s <= '9')
145 baud = baud*10 + *s++ - '0'; 51 baud = baud*10 + *s++ - '0';
146 if (*s == ',') s++; 52 if (*s == ',')
147 if (*s) parity = *s++; 53 s++;
148 if (*s == ',') s++; 54 if (*s)
149 if (*s) bits = *s++; 55 parity = *s++;
150 if (*s == ',') s++; 56 if (*s == ',')
151 if (*s == 'h') flow = 'r'; 57 s++;
58 if (*s)
59 bits = *s++;
60 if (*s == ',')
61 s++;
62 if (*s == 'h')
63 flow = 'r';
152 } 64 }
153 if (baud == 0) 65 if (baud == 0)
154 baud = 38400; 66 baud = 38400;
@@ -158,8 +70,9 @@ static void __init console_config(void)
158 bits = '8'; 70 bits = '8';
159 if (flow == '\0') 71 if (flow == '\0')
160 flow = 'r'; 72 flow = 'r';
161 sprintf(console_string, " console=ttyS0,%d%c%c%c", baud, parity, bits, flow); 73 sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
162 strcat(prom_getcmdline(), console_string); 74 parity, bits, flow);
75 strcat(fw_getcmdline(), console_string);
163 pr_info("Config serial console:%s\n", console_string); 76 pr_info("Config serial console:%s\n", console_string);
164 } 77 }
165} 78}
@@ -193,10 +106,6 @@ extern struct plat_smp_ops msmtc_smp_ops;
193 106
194void __init prom_init(void) 107void __init prom_init(void)
195{ 108{
196 prom_argc = fw_arg0;
197 _prom_argv = (int *) fw_arg1;
198 _prom_envp = (int *) fw_arg2;
199
200 mips_display_message("LINUX"); 109 mips_display_message("LINUX");
201 110
202 /* 111 /*
@@ -306,7 +215,7 @@ void __init prom_init(void)
306 case MIPS_REVISION_SCON_SOCIT: 215 case MIPS_REVISION_SCON_SOCIT:
307 case MIPS_REVISION_SCON_ROCIT: 216 case MIPS_REVISION_SCON_ROCIT:
308 _pcictrl_msc = (unsigned long)ioremap(MIPS_MSC01_PCI_REG_BASE, 0x2000); 217 _pcictrl_msc = (unsigned long)ioremap(MIPS_MSC01_PCI_REG_BASE, 0x2000);
309 mips_pci_controller: 218mips_pci_controller:
310 mb(); 219 mb();
311 MSC_READ(MSC01_PCI_CFG, data); 220 MSC_READ(MSC01_PCI_CFG, data);
312 MSC_WRITE(MSC01_PCI_CFG, data & ~MSC01_PCI_CFG_EN_BIT); 221 MSC_WRITE(MSC01_PCI_CFG, data & ~MSC01_PCI_CFG_EN_BIT);
@@ -348,13 +257,13 @@ void __init prom_init(void)
348 default: 257 default:
349 /* Unknown system controller */ 258 /* Unknown system controller */
350 mips_display_message("SC Error"); 259 mips_display_message("SC Error");
351 while (1); /* We die here... */ 260 while (1); /* We die here... */
352 } 261 }
353 board_nmi_handler_setup = mips_nmi_setup; 262 board_nmi_handler_setup = mips_nmi_setup;
354 board_ejtag_handler_setup = mips_ejtag_setup; 263 board_ejtag_handler_setup = mips_ejtag_setup;
355 264
356 prom_init_cmdline(); 265 fw_init_cmdline();
357 prom_meminit(); 266 fw_meminit();
358#ifdef CONFIG_SERIAL_8250_CONSOLE 267#ifdef CONFIG_SERIAL_8250_CONSOLE
359 console_config(); 268 console_config();
360#endif 269#endif
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index e364af70e6cf..0a1339ac3ec8 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -47,7 +47,6 @@
47#include <asm/setup.h> 47#include <asm/setup.h>
48 48
49int gcmp_present = -1; 49int gcmp_present = -1;
50int gic_present;
51static unsigned long _msc01_biu_base; 50static unsigned long _msc01_biu_base;
52static unsigned long _gcmp_base; 51static unsigned long _gcmp_base;
53static unsigned int ipi_map[NR_CPUS]; 52static unsigned int ipi_map[NR_CPUS];
@@ -134,6 +133,9 @@ static void malta_ipi_irqdispatch(void)
134{ 133{
135 int irq; 134 int irq;
136 135
136 if (gic_compare_int())
137 do_IRQ(MIPS_GIC_IRQ_BASE);
138
137 irq = gic_get_int(); 139 irq = gic_get_int();
138 if (irq < 0) 140 if (irq < 0)
139 return; /* interrupt has already been cleared */ 141 return; /* interrupt has already been cleared */
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index f3d43aa023a9..1f73d63e92a7 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -1,73 +1,45 @@
1/* 1/*
2 * Carsten Langgaard, carstenl@mips.com 2 * This file is subject to the terms and conditions of the GNU General Public
3 * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. 3 * License. See the file "COPYING" in the main directory of this archive
4 * 4 * for more details.
5 * This program is free software; you can distribute it and/or modify it
6 * under the terms of the GNU General Public License (Version 2) as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
17 * 5 *
18 * PROM library functions for acquiring/using memory descriptors given to 6 * PROM library functions for acquiring/using memory descriptors given to
19 * us from the YAMON. 7 * us from the YAMON.
8 *
9 * Copyright (C) 1999,2000,2012 MIPS Technologies, Inc.
10 * All rights reserved.
11 * Authors: Carsten Langgaard <carstenl@mips.com>
12 * Steven J. Hill <sjhill@mips.com>
20 */ 13 */
21#include <linux/init.h> 14#include <linux/init.h>
22#include <linux/mm.h>
23#include <linux/bootmem.h> 15#include <linux/bootmem.h>
24#include <linux/pfn.h>
25#include <linux/string.h> 16#include <linux/string.h>
26 17
27#include <asm/bootinfo.h> 18#include <asm/bootinfo.h>
28#include <asm/page.h>
29#include <asm/sections.h> 19#include <asm/sections.h>
20#include <asm/fw/fw.h>
30 21
31#include <asm/mips-boards/prom.h> 22static fw_memblock_t mdesc[FW_MAX_MEMBLOCKS];
32
33/*#define DEBUG*/
34
35enum yamon_memtypes {
36 yamon_dontuse,
37 yamon_prom,
38 yamon_free,
39};
40static struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS];
41
42#ifdef DEBUG
43static char *mtypes[3] = {
44 "Dont use memory",
45 "YAMON PROM memory",
46 "Free memory",
47};
48#endif
49 23
50/* determined physical memory size, not overridden by command line args */ 24/* determined physical memory size, not overridden by command line args */
51unsigned long physical_memsize = 0L; 25unsigned long physical_memsize = 0L;
52 26
53static struct prom_pmemblock * __init prom_getmdesc(void) 27fw_memblock_t * __init fw_getmdesc(void)
54{ 28{
55 char *memsize_str; 29 char *memsize_str, *ptr;
56 unsigned int memsize; 30 unsigned int memsize;
57 char *ptr;
58 static char cmdline[COMMAND_LINE_SIZE] __initdata; 31 static char cmdline[COMMAND_LINE_SIZE] __initdata;
32 long val;
33 int tmp;
59 34
60 /* otherwise look in the environment */ 35 /* otherwise look in the environment */
61 memsize_str = prom_getenv("memsize"); 36 memsize_str = fw_getenv("memsize");
62 if (!memsize_str) { 37 if (!memsize_str) {
63 printk(KERN_WARNING 38 pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
64 "memsize not set in boot prom, set to default (32Mb)\n");
65 physical_memsize = 0x02000000; 39 physical_memsize = 0x02000000;
66 } else { 40 } else {
67#ifdef DEBUG 41 tmp = kstrtol(memsize_str, 0, &val);
68 pr_debug("prom_memsize = %s\n", memsize_str); 42 physical_memsize = (unsigned long)val;
69#endif
70 physical_memsize = simple_strtol(memsize_str, NULL, 0);
71 } 43 }
72 44
73#ifdef CONFIG_CPU_BIG_ENDIAN 45#ifdef CONFIG_CPU_BIG_ENDIAN
@@ -90,11 +62,11 @@ static struct prom_pmemblock * __init prom_getmdesc(void)
90 62
91 memset(mdesc, 0, sizeof(mdesc)); 63 memset(mdesc, 0, sizeof(mdesc));
92 64
93 mdesc[0].type = yamon_dontuse; 65 mdesc[0].type = fw_dontuse;
94 mdesc[0].base = 0x00000000; 66 mdesc[0].base = 0x00000000;
95 mdesc[0].size = 0x00001000; 67 mdesc[0].size = 0x00001000;
96 68
97 mdesc[1].type = yamon_prom; 69 mdesc[1].type = fw_code;
98 mdesc[1].base = 0x00001000; 70 mdesc[1].base = 0x00001000;
99 mdesc[1].size = 0x000ef000; 71 mdesc[1].size = 0x000ef000;
100 72
@@ -105,55 +77,45 @@ static struct prom_pmemblock * __init prom_getmdesc(void)
105 * This mean that this area can't be used as DMA memory for PCI 77 * This mean that this area can't be used as DMA memory for PCI
106 * devices. 78 * devices.
107 */ 79 */
108 mdesc[2].type = yamon_dontuse; 80 mdesc[2].type = fw_dontuse;
109 mdesc[2].base = 0x000f0000; 81 mdesc[2].base = 0x000f0000;
110 mdesc[2].size = 0x00010000; 82 mdesc[2].size = 0x00010000;
111 83
112 mdesc[3].type = yamon_dontuse; 84 mdesc[3].type = fw_dontuse;
113 mdesc[3].base = 0x00100000; 85 mdesc[3].base = 0x00100000;
114 mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - mdesc[3].base; 86 mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) -
87 mdesc[3].base;
115 88
116 mdesc[4].type = yamon_free; 89 mdesc[4].type = fw_free;
117 mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end)); 90 mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
118 mdesc[4].size = memsize - mdesc[4].base; 91 mdesc[4].size = memsize - mdesc[4].base;
119 92
120 return &mdesc[0]; 93 return &mdesc[0];
121} 94}
122 95
123static int __init prom_memtype_classify(unsigned int type) 96static int __init fw_memtype_classify(unsigned int type)
124{ 97{
125 switch (type) { 98 switch (type) {
126 case yamon_free: 99 case fw_free:
127 return BOOT_MEM_RAM; 100 return BOOT_MEM_RAM;
128 case yamon_prom: 101 case fw_code:
129 return BOOT_MEM_ROM_DATA; 102 return BOOT_MEM_ROM_DATA;
130 default: 103 default:
131 return BOOT_MEM_RESERVED; 104 return BOOT_MEM_RESERVED;
132 } 105 }
133} 106}
134 107
135void __init prom_meminit(void) 108void __init fw_meminit(void)
136{ 109{
137 struct prom_pmemblock *p; 110 fw_memblock_t *p;
138 111
139#ifdef DEBUG 112 p = fw_getmdesc();
140 pr_debug("YAMON MEMORY DESCRIPTOR dump:\n");
141 p = prom_getmdesc();
142 while (p->size) {
143 int i = 0;
144 pr_debug("[%d,%p]: base<%08lx> size<%08lx> type<%s>\n",
145 i, p, p->base, p->size, mtypes[p->type]);
146 p++;
147 i++;
148 }
149#endif
150 p = prom_getmdesc();
151 113
152 while (p->size) { 114 while (p->size) {
153 long type; 115 long type;
154 unsigned long base, size; 116 unsigned long base, size;
155 117
156 type = prom_memtype_classify(p->type); 118 type = fw_memtype_classify(p->type);
157 base = p->base; 119 base = p->base;
158 size = p->size; 120 size = p->size;
159 121
@@ -172,7 +134,7 @@ void __init prom_free_prom_memory(void)
172 continue; 134 continue;
173 135
174 addr = boot_mem_map.map[i].addr; 136 addr = boot_mem_map.map[i].addr;
175 free_init_pages("prom memory", 137 free_init_pages("YAMON memory",
176 addr, addr + boot_mem_map.map[i].size); 138 addr, addr + boot_mem_map.map[i].size);
177 } 139 }
178} 140}
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 200f64df2c9b..c72a06936781 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -25,13 +25,13 @@
25#include <linux/screen_info.h> 25#include <linux/screen_info.h>
26#include <linux/time.h> 26#include <linux/time.h>
27 27
28#include <asm/bootinfo.h> 28#include <asm/fw/fw.h>
29#include <asm/mips-boards/generic.h> 29#include <asm/mips-boards/generic.h>
30#include <asm/mips-boards/prom.h>
31#include <asm/mips-boards/malta.h> 30#include <asm/mips-boards/malta.h>
32#include <asm/mips-boards/maltaint.h> 31#include <asm/mips-boards/maltaint.h>
33#include <asm/dma.h> 32#include <asm/dma.h>
34#include <asm/traps.h> 33#include <asm/traps.h>
34#include <asm/gcmpregs.h>
35#ifdef CONFIG_VT 35#ifdef CONFIG_VT
36#include <linux/console.h> 36#include <linux/console.h>
37#endif 37#endif
@@ -105,6 +105,66 @@ static void __init fd_activate(void)
105} 105}
106#endif 106#endif
107 107
108static int __init plat_enable_iocoherency(void)
109{
110 int supported = 0;
111 if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) {
112 if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
113 BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
114 pr_info("Enabled Bonito CPU coherency\n");
115 supported = 1;
116 }
117 if (strstr(fw_getcmdline(), "iobcuncached")) {
118 BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
119 BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
120 ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
121 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
122 pr_info("Disabled Bonito IOBC coherency\n");
123 } else {
124 BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
125 BONITO_PCIMEMBASECFG |=
126 (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
127 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
128 pr_info("Enabled Bonito IOBC coherency\n");
129 }
130 } else if (gcmp_niocu() != 0) {
131 /* Nothing special needs to be done to enable coherency */
132 pr_info("CMP IOCU detected\n");
133 if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) {
134 pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
135 return 0;
136 }
137 supported = 1;
138 }
139 hw_coherentio = supported;
140 return supported;
141}
142
143static void __init plat_setup_iocoherency(void)
144{
145#ifdef CONFIG_DMA_NONCOHERENT
146 /*
147 * Kernel has been configured with software coherency
148 * but we might choose to turn it off and use hardware
149 * coherency instead.
150 */
151 if (plat_enable_iocoherency()) {
152 if (coherentio == 0)
153 pr_info("Hardware DMA cache coherency disabled\n");
154 else
155 pr_info("Hardware DMA cache coherency enabled\n");
156 } else {
157 if (coherentio == 1)
158 pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n");
159 else
160 pr_info("Software DMA cache coherency enabled\n");
161 }
162#else
163 if (!plat_enable_iocoherency())
164 panic("Hardware DMA cache coherency not supported!");
165#endif
166}
167
108#ifdef CONFIG_BLK_DEV_IDE 168#ifdef CONFIG_BLK_DEV_IDE
109static void __init pci_clock_check(void) 169static void __init pci_clock_check(void)
110{ 170{
@@ -115,16 +175,15 @@ static void __init pci_clock_check(void)
115 33, 20, 25, 30, 12, 16, 37, 10 175 33, 20, 25, 30, 12, 16, 37, 10
116 }; 176 };
117 int pciclock = pciclocks[jmpr]; 177 int pciclock = pciclocks[jmpr];
118 char *argptr = prom_getcmdline(); 178 char *argptr = fw_getcmdline();
119 179
120 if (pciclock != 33 && !strstr(argptr, "idebus=")) { 180 if (pciclock != 33 && !strstr(argptr, "idebus=")) {
121 printk(KERN_WARNING "WARNING: PCI clock is %dMHz, " 181 pr_warn("WARNING: PCI clock is %dMHz, setting idebus\n",
122 "setting idebus\n", pciclock); 182 pciclock);
123 argptr += strlen(argptr); 183 argptr += strlen(argptr);
124 sprintf(argptr, " idebus=%d", pciclock); 184 sprintf(argptr, " idebus=%d", pciclock);
125 if (pciclock < 20 || pciclock > 66) 185 if (pciclock < 20 || pciclock > 66)
126 printk(KERN_WARNING "WARNING: IDE timing " 186 pr_warn("WARNING: IDE timing calculations will be incorrect\n");
127 "calculations will be incorrect\n");
128 } 187 }
129} 188}
130#endif 189#endif
@@ -153,31 +212,31 @@ static void __init bonito_quirks_setup(void)
153{ 212{
154 char *argptr; 213 char *argptr;
155 214
156 argptr = prom_getcmdline(); 215 argptr = fw_getcmdline();
157 if (strstr(argptr, "debug")) { 216 if (strstr(argptr, "debug")) {
158 BONITO_BONGENCFG |= BONITO_BONGENCFG_DEBUGMODE; 217 BONITO_BONGENCFG |= BONITO_BONGENCFG_DEBUGMODE;
159 printk(KERN_INFO "Enabled Bonito debug mode\n"); 218 pr_info("Enabled Bonito debug mode\n");
160 } else 219 } else
161 BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE; 220 BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE;
162 221
163#ifdef CONFIG_DMA_COHERENT 222#ifdef CONFIG_DMA_COHERENT
164 if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) { 223 if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
165 BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN; 224 BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
166 printk(KERN_INFO "Enabled Bonito CPU coherency\n"); 225 pr_info("Enabled Bonito CPU coherency\n");
167 226
168 argptr = prom_getcmdline(); 227 argptr = fw_getcmdline();
169 if (strstr(argptr, "iobcuncached")) { 228 if (strstr(argptr, "iobcuncached")) {
170 BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN; 229 BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
171 BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG & 230 BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
172 ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED | 231 ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
173 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED); 232 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
174 printk(KERN_INFO "Disabled Bonito IOBC coherency\n"); 233 pr_info("Disabled Bonito IOBC coherency\n");
175 } else { 234 } else {
176 BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN; 235 BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
177 BONITO_PCIMEMBASECFG |= 236 BONITO_PCIMEMBASECFG |=
178 (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED | 237 (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
179 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED); 238 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
180 printk(KERN_INFO "Enabled Bonito IOBC coherency\n"); 239 pr_info("Enabled Bonito IOBC coherency\n");
181 } 240 }
182 } else 241 } else
183 panic("Hardware DMA cache coherency not supported"); 242 panic("Hardware DMA cache coherency not supported");
@@ -207,6 +266,8 @@ void __init plat_mem_setup(void)
207 if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) 266 if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO)
208 bonito_quirks_setup(); 267 bonito_quirks_setup();
209 268
269 plat_setup_iocoherency();
270
210#ifdef CONFIG_BLK_DEV_IDE 271#ifdef CONFIG_BLK_DEV_IDE
211 pci_clock_check(); 272 pci_clock_check();
212#endif 273#endif
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index a144b89cf9ba..0ad305f75802 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -39,12 +39,9 @@
39#include <asm/gic.h> 39#include <asm/gic.h>
40 40
41#include <asm/mips-boards/generic.h> 41#include <asm/mips-boards/generic.h>
42#include <asm/mips-boards/prom.h>
43
44#include <asm/mips-boards/maltaint.h> 42#include <asm/mips-boards/maltaint.h>
45 43
46unsigned long cpu_khz; 44unsigned long cpu_khz;
47int gic_frequency;
48 45
49static int mips_cpu_timer_irq; 46static int mips_cpu_timer_irq;
50static int mips_cpu_perf_irq; 47static int mips_cpu_perf_irq;
@@ -74,7 +71,24 @@ static void __init estimate_frequencies(void)
74{ 71{
75 unsigned long flags; 72 unsigned long flags;
76 unsigned int count, start; 73 unsigned int count, start;
74#ifdef CONFIG_IRQ_GIC
77 unsigned int giccount = 0, gicstart = 0; 75 unsigned int giccount = 0, gicstart = 0;
76#endif
77
78#if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ)
79 unsigned int prid = read_c0_prid() & 0xffff00;
80
81 /*
82 * XXXKYMA: hardwire the CPU frequency to Host Freq/4
83 */
84 count = (CONFIG_KVM_HOST_FREQ * 1000000) >> 3;
85 if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
86 (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
87 count *= 2;
88
89 mips_hpt_frequency = count;
90 return;
91#endif
78 92
79 local_irq_save(flags); 93 local_irq_save(flags);
80 94
@@ -84,26 +98,32 @@ static void __init estimate_frequencies(void)
84 98
85 /* Initialize counters. */ 99 /* Initialize counters. */
86 start = read_c0_count(); 100 start = read_c0_count();
101#ifdef CONFIG_IRQ_GIC
87 if (gic_present) 102 if (gic_present)
88 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart); 103 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart);
104#endif
89 105
90 /* Read counter exactly on falling edge of update flag. */ 106 /* Read counter exactly on falling edge of update flag. */
91 while (CMOS_READ(RTC_REG_A) & RTC_UIP); 107 while (CMOS_READ(RTC_REG_A) & RTC_UIP);
92 while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); 108 while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
93 109
94 count = read_c0_count(); 110 count = read_c0_count();
111#ifdef CONFIG_IRQ_GIC
95 if (gic_present) 112 if (gic_present)
96 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount); 113 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount);
114#endif
97 115
98 local_irq_restore(flags); 116 local_irq_restore(flags);
99 117
100 count -= start; 118 count -= start;
101 if (gic_present)
102 giccount -= gicstart;
103
104 mips_hpt_frequency = count; 119 mips_hpt_frequency = count;
105 if (gic_present) 120
121#ifdef CONFIG_IRQ_GIC
122 if (gic_present) {
123 giccount -= gicstart;
106 gic_frequency = giccount; 124 gic_frequency = giccount;
125 }
126#endif
107} 127}
108 128
109void read_persistent_clock(struct timespec *ts) 129void read_persistent_clock(struct timespec *ts)
@@ -159,24 +179,27 @@ void __init plat_time_init(void)
159 (prid != (PRID_COMP_MIPS | PRID_IMP_25KF))) 179 (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
160 freq *= 2; 180 freq *= 2;
161 freq = freqround(freq, 5000); 181 freq = freqround(freq, 5000);
162 pr_debug("CPU frequency %d.%02d MHz\n", freq/1000000, 182 printk("CPU frequency %d.%02d MHz\n", freq/1000000,
163 (freq%1000000)*100/1000000); 183 (freq%1000000)*100/1000000);
164 cpu_khz = freq / 1000; 184 cpu_khz = freq / 1000;
165 185
166 if (gic_present) { 186 mips_scroll_message();
167 freq = freqround(gic_frequency, 5000);
168 pr_debug("GIC frequency %d.%02d MHz\n", freq/1000000,
169 (freq%1000000)*100/1000000);
170 gic_clocksource_init(gic_frequency);
171 } else
172 init_r4k_clocksource();
173 187
174#ifdef CONFIG_I8253 188#ifdef CONFIG_I8253
175 /* Only Malta has a PIT. */ 189 /* Only Malta has a PIT. */
176 setup_pit_timer(); 190 setup_pit_timer();
177#endif 191#endif
178 192
179 mips_scroll_message(); 193#ifdef CONFIG_IRQ_GIC
194 if (gic_present) {
195 freq = freqround(gic_frequency, 5000);
196 printk("GIC frequency %d.%02d MHz\n", freq/1000000,
197 (freq%1000000)*100/1000000);
198#ifdef CONFIG_CSRC_GIC
199 gic_clocksource_init(gic_frequency);
200#endif
201 }
202#endif
180 203
181 plat_perf_setup(); 204 plat_perf_setup();
182} 205}
diff --git a/arch/mips/mti-sead3/Makefile b/arch/mips/mti-sead3/Makefile
index 10ec701ce6c7..be114209217c 100644
--- a/arch/mips/mti-sead3/Makefile
+++ b/arch/mips/mti-sead3/Makefile
@@ -8,10 +8,10 @@
8# Copyright (C) 2012 MIPS Technoligies, Inc. All rights reserved. 8# Copyright (C) 2012 MIPS Technoligies, Inc. All rights reserved.
9# Steven J. Hill <sjhill@mips.com> 9# Steven J. Hill <sjhill@mips.com>
10# 10#
11obj-y := sead3-lcd.o sead3-cmdline.o \ 11obj-y := sead3-lcd.o sead3-display.o sead3-init.o \
12 sead3-display.o sead3-init.o sead3-int.o \ 12 sead3-int.o sead3-mtd.o sead3-net.o \
13 sead3-mtd.o sead3-net.o sead3-platform.o \ 13 sead3-platform.o sead3-reset.o \
14 sead3-reset.o sead3-setup.o sead3-time.o 14 sead3-setup.o sead3-time.o
15 15
16obj-y += sead3-i2c-dev.o sead3-i2c.o \ 16obj-y += sead3-i2c-dev.o sead3-i2c.o \
17 sead3-pic32-i2c-drv.o sead3-pic32-bus.o \ 17 sead3-pic32-i2c-drv.o sead3-pic32-bus.o \
diff --git a/arch/mips/mti-sead3/leds-sead3.c b/arch/mips/mti-sead3/leds-sead3.c
index 322148c353ed..0a168c948b01 100644
--- a/arch/mips/mti-sead3/leds-sead3.c
+++ b/arch/mips/mti-sead3/leds-sead3.c
@@ -34,33 +34,15 @@ static void sead3_fled_set(struct led_classdev *led_cdev,
34static struct led_classdev sead3_pled = { 34static struct led_classdev sead3_pled = {
35 .name = "sead3::pled", 35 .name = "sead3::pled",
36 .brightness_set = sead3_pled_set, 36 .brightness_set = sead3_pled_set,
37 .flags = LED_CORE_SUSPENDRESUME,
37}; 38};
38 39
39static struct led_classdev sead3_fled = { 40static struct led_classdev sead3_fled = {
40 .name = "sead3::fled", 41 .name = "sead3::fled",
41 .brightness_set = sead3_fled_set, 42 .brightness_set = sead3_fled_set,
43 .flags = LED_CORE_SUSPENDRESUME,
42}; 44};
43 45
44#ifdef CONFIG_PM
45static int sead3_led_suspend(struct platform_device *dev,
46 pm_message_t state)
47{
48 led_classdev_suspend(&sead3_pled);
49 led_classdev_suspend(&sead3_fled);
50 return 0;
51}
52
53static int sead3_led_resume(struct platform_device *dev)
54{
55 led_classdev_resume(&sead3_pled);
56 led_classdev_resume(&sead3_fled);
57 return 0;
58}
59#else
60#define sead3_led_suspend NULL
61#define sead3_led_resume NULL
62#endif
63
64static int sead3_led_probe(struct platform_device *pdev) 46static int sead3_led_probe(struct platform_device *pdev)
65{ 47{
66 int ret; 48 int ret;
@@ -86,8 +68,6 @@ static int sead3_led_remove(struct platform_device *pdev)
86static struct platform_driver sead3_led_driver = { 68static struct platform_driver sead3_led_driver = {
87 .probe = sead3_led_probe, 69 .probe = sead3_led_probe,
88 .remove = sead3_led_remove, 70 .remove = sead3_led_remove,
89 .suspend = sead3_led_suspend,
90 .resume = sead3_led_resume,
91 .driver = { 71 .driver = {
92 .name = DRVNAME, 72 .name = DRVNAME,
93 .owner = THIS_MODULE, 73 .owner = THIS_MODULE,
diff --git a/arch/mips/mti-sead3/sead3-cmdline.c b/arch/mips/mti-sead3/sead3-cmdline.c
deleted file mode 100644
index a2e6cec67f57..000000000000
--- a/arch/mips/mti-sead3/sead3-cmdline.c
+++ /dev/null
@@ -1,46 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 */
8#include <linux/init.h>
9#include <linux/string.h>
10
11#include <asm/bootinfo.h>
12
13extern int prom_argc;
14extern int *_prom_argv;
15
16/*
17 * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
18 * This macro take care of sign extension.
19 */
20#define prom_argv(index) ((char *)(long)_prom_argv[(index)])
21
22char * __init prom_getcmdline(void)
23{
24 return &(arcs_cmdline[0]);
25}
26
27void __init prom_init_cmdline(void)
28{
29 char *cp;
30 int actr;
31
32 actr = 1; /* Always ignore argv[0] */
33
34 cp = &(arcs_cmdline[0]);
35 while (actr < prom_argc) {
36 strcpy(cp, prom_argv(actr));
37 cp += strlen(prom_argv(actr));
38 *cp++ = ' ';
39 actr++;
40 }
41 if (cp != &(arcs_cmdline[0])) {
42 /* get rid of trailing space */
43 --cp;
44 *cp = '\0';
45 }
46}
diff --git a/arch/mips/mti-sead3/sead3-console.c b/arch/mips/mti-sead3/sead3-console.c
index 2ddef19a9adc..031f47d69770 100644
--- a/arch/mips/mti-sead3/sead3-console.c
+++ b/arch/mips/mti-sead3/sead3-console.c
@@ -26,7 +26,7 @@ static inline void serial_out(int offset, int value, unsigned int base_addr)
26 __raw_writel(value, PORT(base_addr, offset)); 26 __raw_writel(value, PORT(base_addr, offset));
27} 27}
28 28
29void __init prom_init_early_console(char port) 29void __init fw_init_early_console(char port)
30{ 30{
31 console_port = port; 31 console_port = port;
32} 32}
diff --git a/arch/mips/mti-sead3/sead3-display.c b/arch/mips/mti-sead3/sead3-display.c
index e389326cfa42..94875991907b 100644
--- a/arch/mips/mti-sead3/sead3-display.c
+++ b/arch/mips/mti-sead3/sead3-display.c
@@ -8,7 +8,6 @@
8#include <linux/timer.h> 8#include <linux/timer.h>
9#include <linux/io.h> 9#include <linux/io.h>
10#include <asm/mips-boards/generic.h> 10#include <asm/mips-boards/generic.h>
11#include <asm/mips-boards/prom.h>
12 11
13static unsigned int display_count; 12static unsigned int display_count;
14static unsigned int max_display_count; 13static unsigned int max_display_count;
diff --git a/arch/mips/mti-sead3/sead3-init.c b/arch/mips/mti-sead3/sead3-init.c
index f95abaa1aa5d..bfbd17b120a2 100644
--- a/arch/mips/mti-sead3/sead3-init.c
+++ b/arch/mips/mti-sead3/sead3-init.c
@@ -12,38 +12,51 @@
12#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
13#include <asm/traps.h> 13#include <asm/traps.h>
14#include <asm/mips-boards/generic.h> 14#include <asm/mips-boards/generic.h>
15#include <asm/mips-boards/prom.h> 15#include <asm/fw/fw.h>
16
17extern void prom_init_early_console(char port);
18 16
19extern char except_vec_nmi; 17extern char except_vec_nmi;
20extern char except_vec_ejtag_debug; 18extern char except_vec_ejtag_debug;
21 19
22int prom_argc; 20#ifdef CONFIG_SERIAL_8250_CONSOLE
23int *_prom_argv, *_prom_envp; 21static void __init console_config(void)
24
25#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
26
27char *prom_getenv(char *envname)
28{ 22{
29 /* 23 char console_string[40];
30 * Return a pointer to the given environment variable. 24 int baud = 0;
31 * In 64-bit mode: we're using 64-bit pointers, but all pointers 25 char parity = '\0', bits = '\0', flow = '\0';
32 * in the PROM structures are only 32-bit, so we need some 26 char *s;
33 * workarounds, if we are running in 64-bit mode. 27
34 */ 28 if ((strstr(fw_getcmdline(), "console=")) == NULL) {
35 int i, index = 0; 29 s = fw_getenv("modetty0");
36 30 if (s) {
37 i = strlen(envname); 31 while (*s >= '0' && *s <= '9')
38 32 baud = baud*10 + *s++ - '0';
39 while (prom_envp(index)) { 33 if (*s == ',')
40 if (strncmp(envname, prom_envp(index), i) == 0) 34 s++;
41 return prom_envp(index+1); 35 if (*s)
42 index += 2; 36 parity = *s++;
37 if (*s == ',')
38 s++;
39 if (*s)
40 bits = *s++;
41 if (*s == ',')
42 s++;
43 if (*s == 'h')
44 flow = 'r';
45 }
46 if (baud == 0)
47 baud = 38400;
48 if (parity != 'n' && parity != 'o' && parity != 'e')
49 parity = 'n';
50 if (bits != '7' && bits != '8')
51 bits = '8';
52 if (flow == '\0')
53 flow = 'r';
54 sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
55 parity, bits, flow);
56 strcat(fw_getcmdline(), console_string);
43 } 57 }
44
45 return NULL;
46} 58}
59#endif
47 60
48static void __init mips_nmi_setup(void) 61static void __init mips_nmi_setup(void)
49{ 62{
@@ -52,7 +65,41 @@ static void __init mips_nmi_setup(void)
52 base = cpu_has_veic ? 65 base = cpu_has_veic ?
53 (void *)(CAC_BASE + 0xa80) : 66 (void *)(CAC_BASE + 0xa80) :
54 (void *)(CAC_BASE + 0x380); 67 (void *)(CAC_BASE + 0x380);
68#ifdef CONFIG_CPU_MICROMIPS
69 /*
70 * Decrement the exception vector address by one for microMIPS.
71 */
72 memcpy(base, (&except_vec_nmi - 1), 0x80);
73
74 /*
75 * This is a hack. We do not know if the boot loader was built with
76 * microMIPS instructions or not. If it was not, the NMI exception
77 * code at 0x80000a80 will be taken in MIPS32 mode. The hand coded
78 * assembly below forces us into microMIPS mode if we are a pure
79 * microMIPS kernel. The assembly instructions are:
80 *
81 * 3C1A8000 lui k0,0x8000
82 * 375A0381 ori k0,k0,0x381
83 * 03400008 jr k0
84 * 00000000 nop
85 *
86 * The mode switch occurs by jumping to the unaligned exception
87 * vector address at 0x80000381 which would have been 0x80000380
88 * in MIPS32 mode. The jump to the unaligned address transitions
89 * us into microMIPS mode.
90 */
91 if (!cpu_has_veic) {
92 void *base2 = (void *)(CAC_BASE + 0xa80);
93 *((unsigned int *)base2) = 0x3c1a8000;
94 *((unsigned int *)base2 + 1) = 0x375a0381;
95 *((unsigned int *)base2 + 2) = 0x03400008;
96 *((unsigned int *)base2 + 3) = 0x00000000;
97 flush_icache_range((unsigned long)base2,
98 (unsigned long)base2 + 0x10);
99 }
100#else
55 memcpy(base, &except_vec_nmi, 0x80); 101 memcpy(base, &except_vec_nmi, 0x80);
102#endif
56 flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); 103 flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
57} 104}
58 105
@@ -63,29 +110,40 @@ static void __init mips_ejtag_setup(void)
63 base = cpu_has_veic ? 110 base = cpu_has_veic ?
64 (void *)(CAC_BASE + 0xa00) : 111 (void *)(CAC_BASE + 0xa00) :
65 (void *)(CAC_BASE + 0x300); 112 (void *)(CAC_BASE + 0x300);
113#ifdef CONFIG_CPU_MICROMIPS
114 /* Deja vu... */
115 memcpy(base, (&except_vec_ejtag_debug - 1), 0x80);
116 if (!cpu_has_veic) {
117 void *base2 = (void *)(CAC_BASE + 0xa00);
118 *((unsigned int *)base2) = 0x3c1a8000;
119 *((unsigned int *)base2 + 1) = 0x375a0301;
120 *((unsigned int *)base2 + 2) = 0x03400008;
121 *((unsigned int *)base2 + 3) = 0x00000000;
122 flush_icache_range((unsigned long)base2,
123 (unsigned long)base2 + 0x10);
124 }
125#else
66 memcpy(base, &except_vec_ejtag_debug, 0x80); 126 memcpy(base, &except_vec_ejtag_debug, 0x80);
127#endif
67 flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); 128 flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
68} 129}
69 130
70void __init prom_init(void) 131void __init prom_init(void)
71{ 132{
72 prom_argc = fw_arg0;
73 _prom_argv = (int *) fw_arg1;
74 _prom_envp = (int *) fw_arg2;
75
76 board_nmi_handler_setup = mips_nmi_setup; 133 board_nmi_handler_setup = mips_nmi_setup;
77 board_ejtag_handler_setup = mips_ejtag_setup; 134 board_ejtag_handler_setup = mips_ejtag_setup;
78 135
79 prom_init_cmdline(); 136 fw_init_cmdline();
80#ifdef CONFIG_EARLY_PRINTK 137#ifdef CONFIG_EARLY_PRINTK
81 if ((strstr(prom_getcmdline(), "console=ttyS0")) != NULL) 138 if ((strstr(fw_getcmdline(), "console=ttyS0")) != NULL)
82 prom_init_early_console(0); 139 fw_init_early_console(0);
83 else if ((strstr(prom_getcmdline(), "console=ttyS1")) != NULL) 140 else if ((strstr(fw_getcmdline(), "console=ttyS1")) != NULL)
84 prom_init_early_console(1); 141 fw_init_early_console(1);
85#endif 142#endif
86#ifdef CONFIG_SERIAL_8250_CONSOLE 143#ifdef CONFIG_SERIAL_8250_CONSOLE
87 if ((strstr(prom_getcmdline(), "console=")) == NULL) 144 if ((strstr(fw_getcmdline(), "console=")) == NULL)
88 strcat(prom_getcmdline(), " console=ttyS0,38400n8r"); 145 strcat(fw_getcmdline(), " console=ttyS0,38400n8r");
146 console_config();
89#endif 147#endif
90} 148}
91 149
diff --git a/arch/mips/mti-sead3/sead3-int.c b/arch/mips/mti-sead3/sead3-int.c
index e26e08274fc5..6a560ac03def 100644
--- a/arch/mips/mti-sead3/sead3-int.c
+++ b/arch/mips/mti-sead3/sead3-int.c
@@ -20,7 +20,6 @@
20#define SEAD_CONFIG_BASE 0x1b100110 20#define SEAD_CONFIG_BASE 0x1b100110
21#define SEAD_CONFIG_SIZE 4 21#define SEAD_CONFIG_SIZE 4
22 22
23int gic_present;
24static unsigned long sead3_config_reg; 23static unsigned long sead3_config_reg;
25 24
26/* 25/*
diff --git a/arch/mips/mti-sead3/sead3-setup.c b/arch/mips/mti-sead3/sead3-setup.c
index f012fd164cee..b5059dc899f4 100644
--- a/arch/mips/mti-sead3/sead3-setup.c
+++ b/arch/mips/mti-sead3/sead3-setup.c
@@ -11,10 +11,6 @@
11#include <linux/bootmem.h> 11#include <linux/bootmem.h>
12 12
13#include <asm/mips-boards/generic.h> 13#include <asm/mips-boards/generic.h>
14#include <asm/prom.h>
15
16int coherentio; /* 0 => no DMA cache coherency (may be set by user) */
17int hw_coherentio; /* 0 => no HW DMA cache coherency (reflects real HW) */
18 14
19const char *get_system_type(void) 15const char *get_system_type(void)
20{ 16{
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index 239e4e32757f..96b42eb9b5e2 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -11,7 +11,6 @@
11#include <asm/time.h> 11#include <asm/time.h>
12#include <asm/irq.h> 12#include <asm/irq.h>
13#include <asm/mips-boards/generic.h> 13#include <asm/mips-boards/generic.h>
14#include <asm/mips-boards/prom.h>
15 14
16unsigned long cpu_khz; 15unsigned long cpu_khz;
17 16
diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig
index 3c05bf9e280a..e0873a31ebaa 100644
--- a/arch/mips/netlogic/Kconfig
+++ b/arch/mips/netlogic/Kconfig
@@ -2,13 +2,22 @@ if NLM_XLP_BOARD || NLM_XLR_BOARD
2 2
3if NLM_XLP_BOARD 3if NLM_XLP_BOARD
4config DT_XLP_EVP 4config DT_XLP_EVP
5 bool "Built-in device tree for XLP EVP/SVP boards" 5 bool "Built-in device tree for XLP EVP boards"
6 default y 6 default y
7 help 7 help
8 Add an FDT blob for XLP EVP and SVP boards into the kernel. 8 Add an FDT blob for XLP EVP boards into the kernel.
9 This DTB will be used if the firmware does not pass in a DTB 9 This DTB will be used if the firmware does not pass in a DTB
10 pointer to the kernel. The corresponding DTS file is at 10 pointer to the kernel. The corresponding DTS file is at
11 arch/mips/netlogic/dts/xlp_evp.dts 11 arch/mips/netlogic/dts/xlp_evp.dts
12
13config DT_XLP_SVP
14 bool "Built-in device tree for XLP SVP boards"
15 default y
16 help
17 Add an FDT blob for XLP VP boards into the kernel.
18 This DTB will be used if the firmware does not pass in a DTB
19 pointer to the kernel. The corresponding DTS file is at
20 arch/mips/netlogic/dts/xlp_svp.dts
12 21
13config NLM_MULTINODE 22config NLM_MULTINODE
14 bool "Support for multi-chip boards" 23 bool "Support for multi-chip boards"
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index 2bb95dcfe20a..ffba52489bef 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -148,8 +148,7 @@ void nlm_cpus_done(void)
148int nlm_cpu_ready[NR_CPUS]; 148int nlm_cpu_ready[NR_CPUS];
149unsigned long nlm_next_gp; 149unsigned long nlm_next_gp;
150unsigned long nlm_next_sp; 150unsigned long nlm_next_sp;
151 151static cpumask_t phys_cpu_present_mask;
152cpumask_t phys_cpu_present_map;
153 152
154void nlm_boot_secondary(int logical_cpu, struct task_struct *idle) 153void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
155{ 154{
@@ -169,11 +168,12 @@ void __init nlm_smp_setup(void)
169{ 168{
170 unsigned int boot_cpu; 169 unsigned int boot_cpu;
171 int num_cpus, i, ncore; 170 int num_cpus, i, ncore;
171 char buf[64];
172 172
173 boot_cpu = hard_smp_processor_id(); 173 boot_cpu = hard_smp_processor_id();
174 cpumask_clear(&phys_cpu_present_map); 174 cpumask_clear(&phys_cpu_present_mask);
175 175
176 cpumask_set_cpu(boot_cpu, &phys_cpu_present_map); 176 cpumask_set_cpu(boot_cpu, &phys_cpu_present_mask);
177 __cpu_number_map[boot_cpu] = 0; 177 __cpu_number_map[boot_cpu] = 0;
178 __cpu_logical_map[0] = boot_cpu; 178 __cpu_logical_map[0] = boot_cpu;
179 set_cpu_possible(0, true); 179 set_cpu_possible(0, true);
@@ -185,7 +185,7 @@ void __init nlm_smp_setup(void)
185 * it is only set for ASPs (see smpboot.S) 185 * it is only set for ASPs (see smpboot.S)
186 */ 186 */
187 if (nlm_cpu_ready[i]) { 187 if (nlm_cpu_ready[i]) {
188 cpumask_set_cpu(i, &phys_cpu_present_map); 188 cpumask_set_cpu(i, &phys_cpu_present_mask);
189 __cpu_number_map[i] = num_cpus; 189 __cpu_number_map[i] = num_cpus;
190 __cpu_logical_map[num_cpus] = i; 190 __cpu_logical_map[num_cpus] = i;
191 set_cpu_possible(num_cpus, true); 191 set_cpu_possible(num_cpus, true);
@@ -193,16 +193,19 @@ void __init nlm_smp_setup(void)
193 } 193 }
194 } 194 }
195 195
196 cpumask_scnprintf(buf, ARRAY_SIZE(buf), &phys_cpu_present_mask);
197 pr_info("Physical CPU mask: %s\n", buf);
198 cpumask_scnprintf(buf, ARRAY_SIZE(buf), cpu_possible_mask);
199 pr_info("Possible CPU mask: %s\n", buf);
200
196 /* check with the cores we have worken up */ 201 /* check with the cores we have worken up */
197 for (ncore = 0, i = 0; i < NLM_NR_NODES; i++) 202 for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
198 ncore += hweight32(nlm_get_node(i)->coremask); 203 ncore += hweight32(nlm_get_node(i)->coremask);
199 204
200 pr_info("Phys CPU present map: %lx, possible map %lx\n",
201 (unsigned long)cpumask_bits(&phys_cpu_present_map)[0],
202 (unsigned long)cpumask_bits(cpu_possible_mask)[0]);
203
204 pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore, 205 pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
205 nlm_threads_per_core, num_cpus); 206 nlm_threads_per_core, num_cpus);
207
208 /* switch NMI handler to boot CPUs */
206 nlm_set_nmi_handler(nlm_boot_secondary_cpus); 209 nlm_set_nmi_handler(nlm_boot_secondary_cpus);
207} 210}
208 211
diff --git a/arch/mips/netlogic/dts/Makefile b/arch/mips/netlogic/dts/Makefile
index d117d46413aa..aecb6fa9a9c3 100644
--- a/arch/mips/netlogic/dts/Makefile
+++ b/arch/mips/netlogic/dts/Makefile
@@ -1 +1,2 @@
1obj-$(CONFIG_DT_XLP_EVP) := xlp_evp.dtb.o 1obj-$(CONFIG_DT_XLP_EVP) := xlp_evp.dtb.o
2obj-$(CONFIG_DT_XLP_SVP) += xlp_svp.dtb.o
diff --git a/arch/mips/netlogic/dts/xlp_evp.dts b/arch/mips/netlogic/dts/xlp_evp.dts
index 7628b5464fc7..e14f42308064 100644
--- a/arch/mips/netlogic/dts/xlp_evp.dts
+++ b/arch/mips/netlogic/dts/xlp_evp.dts
@@ -20,7 +20,7 @@
20 #address-cells = <2>; 20 #address-cells = <2>;
21 #size-cells = <1>; 21 #size-cells = <1>;
22 compatible = "simple-bus"; 22 compatible = "simple-bus";
23 ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG 23 ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG
24 1 0 0 0x16000000 0x01000000>; // GBU chipselects 24 1 0 0 0x16000000 0x01000000>; // GBU chipselects
25 25
26 serial0: serial@30000 { 26 serial0: serial@30000 {
diff --git a/arch/mips/netlogic/dts/xlp_svp.dts b/arch/mips/netlogic/dts/xlp_svp.dts
new file mode 100644
index 000000000000..8af4bdbe5d99
--- /dev/null
+++ b/arch/mips/netlogic/dts/xlp_svp.dts
@@ -0,0 +1,124 @@
1/*
2 * XLP3XX Device Tree Source for SVP boards
3 */
4
5/dts-v1/;
6/ {
7 model = "netlogic,XLP-SVP";
8 compatible = "netlogic,xlp";
9 #address-cells = <2>;
10 #size-cells = <2>;
11
12 memory {
13 device_type = "memory";
14 reg = <0 0x00100000 0 0x0FF00000 // 255M at 1M
15 0 0x20000000 0 0xa0000000 // 2560M at 512M
16 0 0xe0000000 0 0x40000000>;
17 };
18
19 soc {
20 #address-cells = <2>;
21 #size-cells = <1>;
22 compatible = "simple-bus";
23 ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG
24 1 0 0 0x16000000 0x01000000>; // GBU chipselects
25
26 serial0: serial@30000 {
27 device_type = "serial";
28 compatible = "ns16550";
29 reg = <0 0x30100 0xa00>;
30 reg-shift = <2>;
31 reg-io-width = <4>;
32 clock-frequency = <133333333>;
33 interrupt-parent = <&pic>;
34 interrupts = <17>;
35 };
36 serial1: serial@31000 {
37 device_type = "serial";
38 compatible = "ns16550";
39 reg = <0 0x31100 0xa00>;
40 reg-shift = <2>;
41 reg-io-width = <4>;
42 clock-frequency = <133333333>;
43 interrupt-parent = <&pic>;
44 interrupts = <18>;
45 };
46 i2c0: ocores@32000 {
47 compatible = "opencores,i2c-ocores";
48 #address-cells = <1>;
49 #size-cells = <0>;
50 reg = <0 0x32100 0xa00>;
51 reg-shift = <2>;
52 reg-io-width = <4>;
53 clock-frequency = <32000000>;
54 interrupt-parent = <&pic>;
55 interrupts = <30>;
56 };
57 i2c1: ocores@33000 {
58 compatible = "opencores,i2c-ocores";
59 #address-cells = <1>;
60 #size-cells = <0>;
61 reg = <0 0x33100 0xa00>;
62 reg-shift = <2>;
63 reg-io-width = <4>;
64 clock-frequency = <32000000>;
65 interrupt-parent = <&pic>;
66 interrupts = <31>;
67
68 rtc@68 {
69 compatible = "dallas,ds1374";
70 reg = <0x68>;
71 };
72
73 dtt@4c {
74 compatible = "national,lm90";
75 reg = <0x4c>;
76 };
77 };
78 pic: pic@4000 {
79 interrupt-controller;
80 #address-cells = <0>;
81 #interrupt-cells = <1>;
82 reg = <0 0x4000 0x200>;
83 };
84
85 nor_flash@1,0 {
86 compatible = "cfi-flash";
87 #address-cells = <1>;
88 #size-cells = <1>;
89 bank-width = <2>;
90 reg = <1 0 0x1000000>;
91
92 partition@0 {
93 label = "x-loader";
94 reg = <0x0 0x100000>; /* 1M */
95 read-only;
96 };
97
98 partition@100000 {
99 label = "u-boot";
100 reg = <0x100000 0x100000>; /* 1M */
101 };
102
103 partition@200000 {
104 label = "kernel";
105 reg = <0x200000 0x500000>; /* 5M */
106 };
107
108 partition@700000 {
109 label = "rootfs";
110 reg = <0x700000 0x800000>; /* 8M */
111 };
112
113 partition@f00000 {
114 label = "env";
115 reg = <0xf00000 0x100000>; /* 1M */
116 read-only;
117 };
118 };
119 };
120
121 chosen {
122 bootargs = "console=ttyS0,115200 rdinit=/sbin/init";
123 };
124};
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index c68fd4026104..87560e4db35f 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -61,43 +61,61 @@ void nlm_node_init(int node)
61 61
62int nlm_irq_to_irt(int irq) 62int nlm_irq_to_irt(int irq)
63{ 63{
64 if (!PIC_IRQ_IS_IRT(irq)) 64 uint64_t pcibase;
65 return -1; 65 int devoff, irt;
66 66
67 switch (irq) { 67 switch (irq) {
68 case PIC_UART_0_IRQ: 68 case PIC_UART_0_IRQ:
69 return PIC_IRT_UART_0_INDEX; 69 devoff = XLP_IO_UART0_OFFSET(0);
70 break;
70 case PIC_UART_1_IRQ: 71 case PIC_UART_1_IRQ:
71 return PIC_IRT_UART_1_INDEX; 72 devoff = XLP_IO_UART1_OFFSET(0);
72 case PIC_PCIE_LINK_0_IRQ: 73 break;
73 return PIC_IRT_PCIE_LINK_0_INDEX;
74 case PIC_PCIE_LINK_1_IRQ:
75 return PIC_IRT_PCIE_LINK_1_INDEX;
76 case PIC_PCIE_LINK_2_IRQ:
77 return PIC_IRT_PCIE_LINK_2_INDEX;
78 case PIC_PCIE_LINK_3_IRQ:
79 return PIC_IRT_PCIE_LINK_3_INDEX;
80 case PIC_EHCI_0_IRQ: 74 case PIC_EHCI_0_IRQ:
81 return PIC_IRT_EHCI_0_INDEX; 75 devoff = XLP_IO_USB_EHCI0_OFFSET(0);
76 break;
82 case PIC_EHCI_1_IRQ: 77 case PIC_EHCI_1_IRQ:
83 return PIC_IRT_EHCI_1_INDEX; 78 devoff = XLP_IO_USB_EHCI1_OFFSET(0);
79 break;
84 case PIC_OHCI_0_IRQ: 80 case PIC_OHCI_0_IRQ:
85 return PIC_IRT_OHCI_0_INDEX; 81 devoff = XLP_IO_USB_OHCI0_OFFSET(0);
82 break;
86 case PIC_OHCI_1_IRQ: 83 case PIC_OHCI_1_IRQ:
87 return PIC_IRT_OHCI_1_INDEX; 84 devoff = XLP_IO_USB_OHCI1_OFFSET(0);
85 break;
88 case PIC_OHCI_2_IRQ: 86 case PIC_OHCI_2_IRQ:
89 return PIC_IRT_OHCI_2_INDEX; 87 devoff = XLP_IO_USB_OHCI2_OFFSET(0);
88 break;
90 case PIC_OHCI_3_IRQ: 89 case PIC_OHCI_3_IRQ:
91 return PIC_IRT_OHCI_3_INDEX; 90 devoff = XLP_IO_USB_OHCI3_OFFSET(0);
91 break;
92 case PIC_MMC_IRQ: 92 case PIC_MMC_IRQ:
93 return PIC_IRT_MMC_INDEX; 93 devoff = XLP_IO_SD_OFFSET(0);
94 break;
94 case PIC_I2C_0_IRQ: 95 case PIC_I2C_0_IRQ:
95 return PIC_IRT_I2C_0_INDEX; 96 devoff = XLP_IO_I2C0_OFFSET(0);
97 break;
96 case PIC_I2C_1_IRQ: 98 case PIC_I2C_1_IRQ:
97 return PIC_IRT_I2C_1_INDEX; 99 devoff = XLP_IO_I2C1_OFFSET(0);
100 break;
98 default: 101 default:
99 return -1; 102 devoff = 0;
103 break;
100 } 104 }
105
106 if (devoff != 0) {
107 pcibase = nlm_pcicfg_base(devoff);
108 irt = nlm_read_reg(pcibase, XLP_PCI_IRTINFO_REG) & 0xffff;
109 /* HW bug, I2C 1 irt entry is off by one */
110 if (irq == PIC_I2C_1_IRQ)
111 irt = irt + 1;
112 } else if (irq >= PIC_PCIE_LINK_0_IRQ && irq <= PIC_PCIE_LINK_3_IRQ) {
113 /* HW bug, PCI IRT entries are bad on early silicon, fix */
114 irt = PIC_IRT_PCIE_LINK_INDEX(irq - PIC_PCIE_LINK_0_IRQ);
115 } else {
116 irt = -1;
117 }
118 return irt;
101} 119}
102 120
103unsigned int nlm_get_core_frequency(int node, int core) 121unsigned int nlm_get_core_frequency(int node, int core)
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index 4894d62043ac..af319143b591 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -56,7 +56,7 @@ uint64_t nlm_io_base;
56struct nlm_soc_info nlm_nodes[NLM_NR_NODES]; 56struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
57cpumask_t nlm_cpumask = CPU_MASK_CPU0; 57cpumask_t nlm_cpumask = CPU_MASK_CPU0;
58unsigned int nlm_threads_per_core; 58unsigned int nlm_threads_per_core;
59extern u32 __dtb_start[]; 59extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], __dtb_start[];
60 60
61static void nlm_linux_exit(void) 61static void nlm_linux_exit(void)
62{ 62{
@@ -82,8 +82,24 @@ void __init plat_mem_setup(void)
82 * 64-bit, so convert pointer. 82 * 64-bit, so convert pointer.
83 */ 83 */
84 fdtp = (void *)(long)fw_arg0; 84 fdtp = (void *)(long)fw_arg0;
85 if (!fdtp) 85 if (!fdtp) {
86 fdtp = __dtb_start; 86 switch (current_cpu_data.processor_id & 0xff00) {
87#ifdef CONFIG_DT_XLP_SVP
88 case PRID_IMP_NETLOGIC_XLP3XX:
89 fdtp = __dtb_xlp_svp_begin;
90 break;
91#endif
92#ifdef CONFIG_DT_XLP_EVP
93 case PRID_IMP_NETLOGIC_XLP8XX:
94 fdtp = __dtb_xlp_evp_begin;
95 break;
96#endif
97 default:
98 /* Pick a built-in if any, and hope for the best */
99 fdtp = __dtb_start;
100 break;
101 }
102 }
87 fdtp = phys_to_virt(__pa(fdtp)); 103 fdtp = phys_to_virt(__pa(fdtp));
88 early_init_devtree(fdtp); 104 early_init_devtree(fdtp);
89} 105}
diff --git a/arch/mips/netlogic/xlp/usb-init.c b/arch/mips/netlogic/xlp/usb-init.c
index 1d0b66c62fd1..9c401dd78337 100644
--- a/arch/mips/netlogic/xlp/usb-init.c
+++ b/arch/mips/netlogic/xlp/usb-init.c
@@ -42,7 +42,30 @@
42#include <asm/netlogic/haldefs.h> 42#include <asm/netlogic/haldefs.h>
43#include <asm/netlogic/xlp-hal/iomap.h> 43#include <asm/netlogic/xlp-hal/iomap.h>
44#include <asm/netlogic/xlp-hal/xlp.h> 44#include <asm/netlogic/xlp-hal/xlp.h>
45#include <asm/netlogic/xlp-hal/usb.h> 45
46/*
47 * USB glue logic registers, used only during initialization
48 */
49#define USB_CTL_0 0x01
50#define USB_PHY_0 0x0A
51#define USB_PHY_RESET 0x01
52#define USB_PHY_PORT_RESET_0 0x10
53#define USB_PHY_PORT_RESET_1 0x20
54#define USB_CONTROLLER_RESET 0x01
55#define USB_INT_STATUS 0x0E
56#define USB_INT_EN 0x0F
57#define USB_PHY_INTERRUPT_EN 0x01
58#define USB_OHCI_INTERRUPT_EN 0x02
59#define USB_OHCI_INTERRUPT1_EN 0x04
60#define USB_OHCI_INTERRUPT2_EN 0x08
61#define USB_CTRL_INTERRUPT_EN 0x10
62
63#define nlm_read_usb_reg(b, r) nlm_read_reg(b, r)
64#define nlm_write_usb_reg(b, r, v) nlm_write_reg(b, r, v)
65#define nlm_get_usb_pcibase(node, inst) \
66 nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst))
67#define nlm_get_usb_regbase(node, inst) \
68 (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
46 69
47static void nlm_usb_intr_en(int node, int port) 70static void nlm_usb_intr_en(int node, int port)
48{ 71{
@@ -99,23 +122,23 @@ static void nlm_usb_fixup_final(struct pci_dev *dev)
99 dev->dev.coherent_dma_mask = DMA_BIT_MASK(64); 122 dev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
100 switch (dev->devfn) { 123 switch (dev->devfn) {
101 case 0x10: 124 case 0x10:
102 dev->irq = PIC_EHCI_0_IRQ; 125 dev->irq = PIC_EHCI_0_IRQ;
103 break; 126 break;
104 case 0x11: 127 case 0x11:
105 dev->irq = PIC_OHCI_0_IRQ; 128 dev->irq = PIC_OHCI_0_IRQ;
106 break; 129 break;
107 case 0x12: 130 case 0x12:
108 dev->irq = PIC_OHCI_1_IRQ; 131 dev->irq = PIC_OHCI_1_IRQ;
109 break; 132 break;
110 case 0x13: 133 case 0x13:
111 dev->irq = PIC_EHCI_1_IRQ; 134 dev->irq = PIC_EHCI_1_IRQ;
112 break; 135 break;
113 case 0x14: 136 case 0x14:
114 dev->irq = PIC_OHCI_2_IRQ; 137 dev->irq = PIC_OHCI_2_IRQ;
115 break; 138 break;
116 case 0x15: 139 case 0x15:
117 dev->irq = PIC_OHCI_3_IRQ; 140 dev->irq = PIC_OHCI_3_IRQ;
118 break; 141 break;
119 } 142 }
120} 143}
121DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_EHCI, 144DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_EHCI,
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 1fd361462c03..e4b1140cdae0 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -41,7 +41,7 @@ static int (*save_perf_irq)(void);
41 * first hardware thread in the core for setup and init. 41 * first hardware thread in the core for setup and init.
42 * Skip CPUs with non-zero hardware thread id (4 hwt per core) 42 * Skip CPUs with non-zero hardware thread id (4 hwt per core)
43 */ 43 */
44#ifdef CONFIG_CPU_XLR 44#if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
45#define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0) 45#define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0)
46#else 46#else
47#define oprofile_skip_cpu(c) 0 47#define oprofile_skip_cpu(c) 0
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
index 412ec025cf55..18517dd0f709 100644
--- a/arch/mips/pci/pci-ar71xx.c
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -366,9 +366,9 @@ static int ar71xx_pci_probe(struct platform_device *pdev)
366 if (!res) 366 if (!res)
367 return -EINVAL; 367 return -EINVAL;
368 368
369 apc->cfg_base = devm_request_and_ioremap(&pdev->dev, res); 369 apc->cfg_base = devm_ioremap_resource(&pdev->dev, res);
370 if (!apc->cfg_base) 370 if (IS_ERR(apc->cfg_base))
371 return -ENOMEM; 371 return PTR_ERR(apc->cfg_base);
372 372
373 apc->irq = platform_get_irq(pdev, 0); 373 apc->irq = platform_get_irq(pdev, 0);
374 if (apc->irq < 0) 374 if (apc->irq < 0)
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 8a0700d448fe..65ec032fa0b4 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -365,25 +365,25 @@ static int ar724x_pci_probe(struct platform_device *pdev)
365 if (!res) 365 if (!res)
366 return -EINVAL; 366 return -EINVAL;
367 367
368 apc->ctrl_base = devm_request_and_ioremap(&pdev->dev, res); 368 apc->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
369 if (apc->ctrl_base == NULL) 369 if (IS_ERR(apc->ctrl_base))
370 return -EBUSY; 370 return PTR_ERR(apc->ctrl_base);
371 371
372 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base"); 372 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
373 if (!res) 373 if (!res)
374 return -EINVAL; 374 return -EINVAL;
375 375
376 apc->devcfg_base = devm_request_and_ioremap(&pdev->dev, res); 376 apc->devcfg_base = devm_ioremap_resource(&pdev->dev, res);
377 if (!apc->devcfg_base) 377 if (IS_ERR(apc->devcfg_base))
378 return -EBUSY; 378 return PTR_ERR(apc->devcfg_base);
379 379
380 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base"); 380 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base");
381 if (!res) 381 if (!res)
382 return -EINVAL; 382 return -EINVAL;
383 383
384 apc->crp_base = devm_request_and_ioremap(&pdev->dev, res); 384 apc->crp_base = devm_ioremap_resource(&pdev->dev, res);
385 if (apc->crp_base == NULL) 385 if (IS_ERR(apc->crp_base))
386 return -EBUSY; 386 return PTR_ERR(apc->crp_base);
387 387
388 apc->irq = platform_get_irq(pdev, 0); 388 apc->irq = platform_get_irq(pdev, 0);
389 if (apc->irq < 0) 389 if (apc->irq < 0)
diff --git a/arch/mips/pci/pci-bcm63xx.c b/arch/mips/pci/pci-bcm63xx.c
index 88e781c6b5ba..2eb954239bc5 100644
--- a/arch/mips/pci/pci-bcm63xx.c
+++ b/arch/mips/pci/pci-bcm63xx.c
@@ -121,11 +121,17 @@ void __iomem *pci_iospace_start;
121static void __init bcm63xx_reset_pcie(void) 121static void __init bcm63xx_reset_pcie(void)
122{ 122{
123 u32 val; 123 u32 val;
124 u32 reg;
124 125
125 /* enable SERDES */ 126 /* enable SERDES */
126 val = bcm_misc_readl(MISC_SERDES_CTRL_REG); 127 if (BCMCPU_IS_6328())
128 reg = MISC_SERDES_CTRL_6328_REG;
129 else
130 reg = MISC_SERDES_CTRL_6362_REG;
131
132 val = bcm_misc_readl(reg);
127 val |= SERDES_PCIE_EN | SERDES_PCIE_EXD_EN; 133 val |= SERDES_PCIE_EN | SERDES_PCIE_EXD_EN;
128 bcm_misc_writel(val, MISC_SERDES_CTRL_REG); 134 bcm_misc_writel(val, reg);
129 135
130 /* reset the PCIe core */ 136 /* reset the PCIe core */
131 bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 1); 137 bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 1);
@@ -330,6 +336,7 @@ static int __init bcm63xx_pci_init(void)
330 336
331 switch (bcm63xx_get_cpu_id()) { 337 switch (bcm63xx_get_cpu_id()) {
332 case BCM6328_CPU_ID: 338 case BCM6328_CPU_ID:
339 case BCM6362_CPU_ID:
333 return bcm63xx_register_pcie(); 340 return bcm63xx_register_pcie();
334 case BCM6348_CPU_ID: 341 case BCM6348_CPU_ID:
335 case BCM6358_CPU_ID: 342 case BCM6358_CPU_ID:
diff --git a/arch/mips/powertv/init.c b/arch/mips/powertv/init.c
index 5bd9d8f468cc..a01baff52cae 100644
--- a/arch/mips/powertv/init.c
+++ b/arch/mips/powertv/init.c
@@ -29,10 +29,11 @@
29#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
30#include <asm/traps.h> 30#include <asm/traps.h>
31 31
32#include <asm/mips-boards/prom.h>
33#include <asm/mips-boards/generic.h> 32#include <asm/mips-boards/generic.h>
34#include <asm/mach-powertv/asic.h> 33#include <asm/mach-powertv/asic.h>
35 34
35#include "init.h"
36
36static int *_prom_envp; 37static int *_prom_envp;
37unsigned long _prom_memsize; 38unsigned long _prom_memsize;
38 39
diff --git a/arch/mips/powertv/init.h b/arch/mips/powertv/init.h
index b194c34ca966..c1a8bd0dbe4b 100644
--- a/arch/mips/powertv/init.h
+++ b/arch/mips/powertv/init.h
@@ -23,4 +23,6 @@
23#ifndef _POWERTV_INIT_H 23#ifndef _POWERTV_INIT_H
24#define _POWERTV_INIT_H 24#define _POWERTV_INIT_H
25extern unsigned long _prom_memsize; 25extern unsigned long _prom_memsize;
26extern void prom_meminit(void);
27extern char *prom_getenv(char *name);
26#endif 28#endif
diff --git a/arch/mips/powertv/memory.c b/arch/mips/powertv/memory.c
index 6e5f1bdc59b5..bc2f3ca22b41 100644
--- a/arch/mips/powertv/memory.c
+++ b/arch/mips/powertv/memory.c
@@ -29,7 +29,6 @@
29#include <asm/page.h> 29#include <asm/page.h>
30#include <asm/sections.h> 30#include <asm/sections.h>
31 31
32#include <asm/mips-boards/prom.h>
33#include <asm/mach-powertv/asic.h> 32#include <asm/mach-powertv/asic.h>
34#include <asm/mach-powertv/ioremap.h> 33#include <asm/mach-powertv/ioremap.h>
35 34
diff --git a/arch/mips/powertv/powertv_setup.c b/arch/mips/powertv/powertv_setup.c
index 820b8480f222..24689bff1039 100644
--- a/arch/mips/powertv/powertv_setup.c
+++ b/arch/mips/powertv/powertv_setup.c
@@ -31,7 +31,6 @@
31#include <asm/bootinfo.h> 31#include <asm/bootinfo.h>
32#include <asm/irq.h> 32#include <asm/irq.h>
33#include <asm/mips-boards/generic.h> 33#include <asm/mips-boards/generic.h>
34#include <asm/mips-boards/prom.h>
35#include <asm/dma.h> 34#include <asm/dma.h>
36#include <asm/asm.h> 35#include <asm/asm.h>
37#include <asm/traps.h> 36#include <asm/traps.h>
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
index a0b0197cab0a..026e823d871d 100644
--- a/arch/mips/ralink/Kconfig
+++ b/arch/mips/ralink/Kconfig
@@ -6,12 +6,23 @@ choice
6 help 6 help
7 Select Ralink MIPS SoC type. 7 Select Ralink MIPS SoC type.
8 8
9 config SOC_RT288X
10 bool "RT288x"
11
9 config SOC_RT305X 12 config SOC_RT305X
10 bool "RT305x" 13 bool "RT305x"
11 select USB_ARCH_HAS_HCD 14 select USB_ARCH_HAS_HCD
12 select USB_ARCH_HAS_OHCI 15 select USB_ARCH_HAS_OHCI
13 select USB_ARCH_HAS_EHCI 16 select USB_ARCH_HAS_EHCI
14 17
18 config SOC_RT3883
19 bool "RT3883"
20 select USB_ARCH_HAS_OHCI
21 select USB_ARCH_HAS_EHCI
22
23 config SOC_MT7620
24 bool "MT7620"
25
15endchoice 26endchoice
16 27
17choice 28choice
@@ -23,10 +34,22 @@ choice
23 config DTB_RT_NONE 34 config DTB_RT_NONE
24 bool "None" 35 bool "None"
25 36
37 config DTB_RT2880_EVAL
38 bool "RT2880 eval kit"
39 depends on SOC_RT288X
40
26 config DTB_RT305X_EVAL 41 config DTB_RT305X_EVAL
27 bool "RT305x eval kit" 42 bool "RT305x eval kit"
28 depends on SOC_RT305X 43 depends on SOC_RT305X
29 44
45 config DTB_RT3883_EVAL
46 bool "RT3883 eval kit"
47 depends on SOC_RT3883
48
49 config DTB_MT7620A_EVAL
50 bool "MT7620A eval kit"
51 depends on SOC_MT7620
52
30endchoice 53endchoice
31 54
32endif 55endif
diff --git a/arch/mips/ralink/Makefile b/arch/mips/ralink/Makefile
index 939757f0e71f..38cf1a880aaa 100644
--- a/arch/mips/ralink/Makefile
+++ b/arch/mips/ralink/Makefile
@@ -8,7 +8,10 @@
8 8
9obj-y := prom.o of.o reset.o clk.o irq.o 9obj-y := prom.o of.o reset.o clk.o irq.o
10 10
11obj-$(CONFIG_SOC_RT288X) += rt288x.o
11obj-$(CONFIG_SOC_RT305X) += rt305x.o 12obj-$(CONFIG_SOC_RT305X) += rt305x.o
13obj-$(CONFIG_SOC_RT3883) += rt3883.o
14obj-$(CONFIG_SOC_MT7620) += mt7620.o
12 15
13obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 16obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
14 17
diff --git a/arch/mips/ralink/Platform b/arch/mips/ralink/Platform
index 6babd65765e6..cda4b6645c50 100644
--- a/arch/mips/ralink/Platform
+++ b/arch/mips/ralink/Platform
@@ -5,6 +5,24 @@ core-$(CONFIG_RALINK) += arch/mips/ralink/
5cflags-$(CONFIG_RALINK) += -I$(srctree)/arch/mips/include/asm/mach-ralink 5cflags-$(CONFIG_RALINK) += -I$(srctree)/arch/mips/include/asm/mach-ralink
6 6
7# 7#
8# Ralink RT288x
9#
10load-$(CONFIG_SOC_RT288X) += 0xffffffff88000000
11cflags-$(CONFIG_SOC_RT288X) += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt288x
12
13#
8# Ralink RT305x 14# Ralink RT305x
9# 15#
10load-$(CONFIG_SOC_RT305X) += 0xffffffff80000000 16load-$(CONFIG_SOC_RT305X) += 0xffffffff80000000
17cflags-$(CONFIG_SOC_RT305X) += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt305x
18
19#
20# Ralink RT3883
21#
22load-$(CONFIG_SOC_RT3883) += 0xffffffff80000000
23cflags-$(CONFIG_SOC_RT3883) += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt3883
24
25#
26# Ralink MT7620
27#
28load-$(CONFIG_SOC_MT7620) += 0xffffffff80000000
diff --git a/arch/mips/ralink/common.h b/arch/mips/ralink/common.h
index 300990313e1b..83144c3fc5ac 100644
--- a/arch/mips/ralink/common.h
+++ b/arch/mips/ralink/common.h
@@ -22,13 +22,22 @@ struct ralink_pinmux {
22 struct ralink_pinmux_grp *mode; 22 struct ralink_pinmux_grp *mode;
23 struct ralink_pinmux_grp *uart; 23 struct ralink_pinmux_grp *uart;
24 int uart_shift; 24 int uart_shift;
25 u32 uart_mask;
25 void (*wdt_reset)(void); 26 void (*wdt_reset)(void);
27 struct ralink_pinmux_grp *pci;
28 int pci_shift;
29 u32 pci_mask;
26}; 30};
27extern struct ralink_pinmux gpio_pinmux; 31extern struct ralink_pinmux rt_gpio_pinmux;
28 32
29struct ralink_soc_info { 33struct ralink_soc_info {
30 unsigned char sys_type[RAMIPS_SYS_TYPE_LEN]; 34 unsigned char sys_type[RAMIPS_SYS_TYPE_LEN];
31 unsigned char *compatible; 35 unsigned char *compatible;
36
37 unsigned long mem_base;
38 unsigned long mem_size;
39 unsigned long mem_size_min;
40 unsigned long mem_size_max;
32}; 41};
33extern struct ralink_soc_info soc_info; 42extern struct ralink_soc_info soc_info;
34 43
diff --git a/arch/mips/ralink/dts/Makefile b/arch/mips/ralink/dts/Makefile
index 1a69fb300955..18194fa93e80 100644
--- a/arch/mips/ralink/dts/Makefile
+++ b/arch/mips/ralink/dts/Makefile
@@ -1 +1,4 @@
1obj-$(CONFIG_DTB_RT2880_EVAL) := rt2880_eval.dtb.o
1obj-$(CONFIG_DTB_RT305X_EVAL) := rt3052_eval.dtb.o 2obj-$(CONFIG_DTB_RT305X_EVAL) := rt3052_eval.dtb.o
3obj-$(CONFIG_DTB_RT3883_EVAL) := rt3883_eval.dtb.o
4obj-$(CONFIG_DTB_MT7620A_EVAL) := mt7620a_eval.dtb.o
diff --git a/arch/mips/ralink/dts/mt7620a.dtsi b/arch/mips/ralink/dts/mt7620a.dtsi
new file mode 100644
index 000000000000..08bf24fefe9f
--- /dev/null
+++ b/arch/mips/ralink/dts/mt7620a.dtsi
@@ -0,0 +1,58 @@
1/ {
2 #address-cells = <1>;
3 #size-cells = <1>;
4 compatible = "ralink,mtk7620a-soc";
5
6 cpus {
7 cpu@0 {
8 compatible = "mips,mips24KEc";
9 };
10 };
11
12 cpuintc: cpuintc@0 {
13 #address-cells = <0>;
14 #interrupt-cells = <1>;
15 interrupt-controller;
16 compatible = "mti,cpu-interrupt-controller";
17 };
18
19 palmbus@10000000 {
20 compatible = "palmbus";
21 reg = <0x10000000 0x200000>;
22 ranges = <0x0 0x10000000 0x1FFFFF>;
23
24 #address-cells = <1>;
25 #size-cells = <1>;
26
27 sysc@0 {
28 compatible = "ralink,mt7620a-sysc";
29 reg = <0x0 0x100>;
30 };
31
32 intc: intc@200 {
33 compatible = "ralink,mt7620a-intc", "ralink,rt2880-intc";
34 reg = <0x200 0x100>;
35
36 interrupt-controller;
37 #interrupt-cells = <1>;
38
39 interrupt-parent = <&cpuintc>;
40 interrupts = <2>;
41 };
42
43 memc@300 {
44 compatible = "ralink,mt7620a-memc", "ralink,rt3050-memc";
45 reg = <0x300 0x100>;
46 };
47
48 uartlite@c00 {
49 compatible = "ralink,mt7620a-uart", "ralink,rt2880-uart", "ns16550a";
50 reg = <0xc00 0x100>;
51
52 interrupt-parent = <&intc>;
53 interrupts = <12>;
54
55 reg-shift = <2>;
56 };
57 };
58};
diff --git a/arch/mips/ralink/dts/mt7620a_eval.dts b/arch/mips/ralink/dts/mt7620a_eval.dts
new file mode 100644
index 000000000000..35eb874ab7f1
--- /dev/null
+++ b/arch/mips/ralink/dts/mt7620a_eval.dts
@@ -0,0 +1,16 @@
1/dts-v1/;
2
3/include/ "mt7620a.dtsi"
4
5/ {
6 compatible = "ralink,mt7620a-eval-board", "ralink,mt7620a-soc";
7 model = "Ralink MT7620A evaluation board";
8
9 memory@0 {
10 reg = <0x0 0x2000000>;
11 };
12
13 chosen {
14 bootargs = "console=ttyS0,57600";
15 };
16};
diff --git a/arch/mips/ralink/dts/rt2880.dtsi b/arch/mips/ralink/dts/rt2880.dtsi
new file mode 100644
index 000000000000..182afde2f2e1
--- /dev/null
+++ b/arch/mips/ralink/dts/rt2880.dtsi
@@ -0,0 +1,58 @@
1/ {
2 #address-cells = <1>;
3 #size-cells = <1>;
4 compatible = "ralink,rt2880-soc";
5
6 cpus {
7 cpu@0 {
8 compatible = "mips,mips4KEc";
9 };
10 };
11
12 cpuintc: cpuintc@0 {
13 #address-cells = <0>;
14 #interrupt-cells = <1>;
15 interrupt-controller;
16 compatible = "mti,cpu-interrupt-controller";
17 };
18
19 palmbus@300000 {
20 compatible = "palmbus";
21 reg = <0x300000 0x200000>;
22 ranges = <0x0 0x300000 0x1FFFFF>;
23
24 #address-cells = <1>;
25 #size-cells = <1>;
26
27 sysc@0 {
28 compatible = "ralink,rt2880-sysc";
29 reg = <0x0 0x100>;
30 };
31
32 intc: intc@200 {
33 compatible = "ralink,rt2880-intc";
34 reg = <0x200 0x100>;
35
36 interrupt-controller;
37 #interrupt-cells = <1>;
38
39 interrupt-parent = <&cpuintc>;
40 interrupts = <2>;
41 };
42
43 memc@300 {
44 compatible = "ralink,rt2880-memc";
45 reg = <0x300 0x100>;
46 };
47
48 uartlite@c00 {
49 compatible = "ralink,rt2880-uart", "ns16550a";
50 reg = <0xc00 0x100>;
51
52 interrupt-parent = <&intc>;
53 interrupts = <8>;
54
55 reg-shift = <2>;
56 };
57 };
58};
diff --git a/arch/mips/ralink/dts/rt2880_eval.dts b/arch/mips/ralink/dts/rt2880_eval.dts
new file mode 100644
index 000000000000..322d7002595b
--- /dev/null
+++ b/arch/mips/ralink/dts/rt2880_eval.dts
@@ -0,0 +1,46 @@
1/dts-v1/;
2
3/include/ "rt2880.dtsi"
4
5/ {
6 compatible = "ralink,rt2880-eval-board", "ralink,rt2880-soc";
7 model = "Ralink RT2880 evaluation board";
8
9 memory@0 {
10 reg = <0x8000000 0x2000000>;
11 };
12
13 chosen {
14 bootargs = "console=ttyS0,57600";
15 };
16
17 cfi@1f000000 {
18 compatible = "cfi-flash";
19 reg = <0x1f000000 0x400000>;
20
21 bank-width = <2>;
22 device-width = <2>;
23 #address-cells = <1>;
24 #size-cells = <1>;
25
26 partition@0 {
27 label = "uboot";
28 reg = <0x0 0x30000>;
29 read-only;
30 };
31 partition@30000 {
32 label = "uboot-env";
33 reg = <0x30000 0x10000>;
34 read-only;
35 };
36 partition@40000 {
37 label = "calibration";
38 reg = <0x40000 0x10000>;
39 read-only;
40 };
41 partition@50000 {
42 label = "linux";
43 reg = <0x50000 0x3b0000>;
44 };
45 };
46};
diff --git a/arch/mips/ralink/dts/rt3050.dtsi b/arch/mips/ralink/dts/rt3050.dtsi
index 069d0660e1dd..ef7da1e227e6 100644
--- a/arch/mips/ralink/dts/rt3050.dtsi
+++ b/arch/mips/ralink/dts/rt3050.dtsi
@@ -1,7 +1,7 @@
1/ { 1/ {
2 #address-cells = <1>; 2 #address-cells = <1>;
3 #size-cells = <1>; 3 #size-cells = <1>;
4 compatible = "ralink,rt3050-soc", "ralink,rt3052-soc"; 4 compatible = "ralink,rt3050-soc", "ralink,rt3052-soc", "ralink,rt3350-soc";
5 5
6 cpus { 6 cpus {
7 cpu@0 { 7 cpu@0 {
@@ -9,10 +9,6 @@
9 }; 9 };
10 }; 10 };
11 11
12 chosen {
13 bootargs = "console=ttyS0,57600 init=/init";
14 };
15
16 cpuintc: cpuintc@0 { 12 cpuintc: cpuintc@0 {
17 #address-cells = <0>; 13 #address-cells = <0>;
18 #interrupt-cells = <1>; 14 #interrupt-cells = <1>;
@@ -23,7 +19,7 @@
23 palmbus@10000000 { 19 palmbus@10000000 {
24 compatible = "palmbus"; 20 compatible = "palmbus";
25 reg = <0x10000000 0x200000>; 21 reg = <0x10000000 0x200000>;
26 ranges = <0x0 0x10000000 0x1FFFFF>; 22 ranges = <0x0 0x10000000 0x1FFFFF>;
27 23
28 #address-cells = <1>; 24 #address-cells = <1>;
29 #size-cells = <1>; 25 #size-cells = <1>;
@@ -33,11 +29,6 @@
33 reg = <0x0 0x100>; 29 reg = <0x0 0x100>;
34 }; 30 };
35 31
36 timer@100 {
37 compatible = "ralink,rt3052-wdt", "ralink,rt2880-wdt";
38 reg = <0x100 0x100>;
39 };
40
41 intc: intc@200 { 32 intc: intc@200 {
42 compatible = "ralink,rt3052-intc", "ralink,rt2880-intc"; 33 compatible = "ralink,rt3052-intc", "ralink,rt2880-intc";
43 reg = <0x200 0x100>; 34 reg = <0x200 0x100>;
@@ -54,45 +45,6 @@
54 reg = <0x300 0x100>; 45 reg = <0x300 0x100>;
55 }; 46 };
56 47
57 gpio0: gpio@600 {
58 compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
59 reg = <0x600 0x34>;
60
61 gpio-controller;
62 #gpio-cells = <2>;
63
64 ralink,ngpio = <24>;
65 ralink,regs = [ 00 04 08 0c
66 20 24 28 2c
67 30 34 ];
68 };
69
70 gpio1: gpio@638 {
71 compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
72 reg = <0x638 0x24>;
73
74 gpio-controller;
75 #gpio-cells = <2>;
76
77 ralink,ngpio = <16>;
78 ralink,regs = [ 00 04 08 0c
79 10 14 18 1c
80 20 24 ];
81 };
82
83 gpio2: gpio@660 {
84 compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
85 reg = <0x660 0x24>;
86
87 gpio-controller;
88 #gpio-cells = <2>;
89
90 ralink,ngpio = <12>;
91 ralink,regs = [ 00 04 08 0c
92 10 14 18 1c
93 20 24 ];
94 };
95
96 uartlite@c00 { 48 uartlite@c00 {
97 compatible = "ralink,rt3052-uart", "ralink,rt2880-uart", "ns16550a"; 49 compatible = "ralink,rt3052-uart", "ralink,rt2880-uart", "ns16550a";
98 reg = <0xc00 0x100>; 50 reg = <0xc00 0x100>;
diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts
index 148a590bc419..c18c9a84f4c4 100644
--- a/arch/mips/ralink/dts/rt3052_eval.dts
+++ b/arch/mips/ralink/dts/rt3052_eval.dts
@@ -1,10 +1,8 @@
1/dts-v1/; 1/dts-v1/;
2 2
3/include/ "rt3050.dtsi" 3#include "rt3050.dtsi"
4 4
5/ { 5/ {
6 #address-cells = <1>;
7 #size-cells = <1>;
8 compatible = "ralink,rt3052-eval-board", "ralink,rt3052-soc"; 6 compatible = "ralink,rt3052-eval-board", "ralink,rt3052-soc";
9 model = "Ralink RT3052 evaluation board"; 7 model = "Ralink RT3052 evaluation board";
10 8
@@ -12,12 +10,8 @@
12 reg = <0x0 0x2000000>; 10 reg = <0x0 0x2000000>;
13 }; 11 };
14 12
15 palmbus@10000000 { 13 chosen {
16 sysc@0 { 14 bootargs = "console=ttyS0,57600";
17 ralink,pinmmux = "uartlite", "spi";
18 ralink,uartmux = "gpio";
19 ralink,wdtmux = <0>;
20 };
21 }; 15 };
22 16
23 cfi@1f000000 { 17 cfi@1f000000 {
diff --git a/arch/mips/ralink/dts/rt3883.dtsi b/arch/mips/ralink/dts/rt3883.dtsi
new file mode 100644
index 000000000000..3b131dd0d5ac
--- /dev/null
+++ b/arch/mips/ralink/dts/rt3883.dtsi
@@ -0,0 +1,58 @@
1/ {
2 #address-cells = <1>;
3 #size-cells = <1>;
4 compatible = "ralink,rt3883-soc";
5
6 cpus {
7 cpu@0 {
8 compatible = "mips,mips74Kc";
9 };
10 };
11
12 cpuintc: cpuintc@0 {
13 #address-cells = <0>;
14 #interrupt-cells = <1>;
15 interrupt-controller;
16 compatible = "mti,cpu-interrupt-controller";
17 };
18
19 palmbus@10000000 {
20 compatible = "palmbus";
21 reg = <0x10000000 0x200000>;
22 ranges = <0x0 0x10000000 0x1FFFFF>;
23
24 #address-cells = <1>;
25 #size-cells = <1>;
26
27 sysc@0 {
28 compatible = "ralink,rt3883-sysc", "ralink,rt3050-sysc";
29 reg = <0x0 0x100>;
30 };
31
32 intc: intc@200 {
33 compatible = "ralink,rt3883-intc", "ralink,rt2880-intc";
34 reg = <0x200 0x100>;
35
36 interrupt-controller;
37 #interrupt-cells = <1>;
38
39 interrupt-parent = <&cpuintc>;
40 interrupts = <2>;
41 };
42
43 memc@300 {
44 compatible = "ralink,rt3883-memc", "ralink,rt3050-memc";
45 reg = <0x300 0x100>;
46 };
47
48 uartlite@c00 {
49 compatible = "ralink,rt3883-uart", "ralink,rt2880-uart", "ns16550a";
50 reg = <0xc00 0x100>;
51
52 interrupt-parent = <&intc>;
53 interrupts = <12>;
54
55 reg-shift = <2>;
56 };
57 };
58};
diff --git a/arch/mips/ralink/dts/rt3883_eval.dts b/arch/mips/ralink/dts/rt3883_eval.dts
new file mode 100644
index 000000000000..2fa6b330bf4f
--- /dev/null
+++ b/arch/mips/ralink/dts/rt3883_eval.dts
@@ -0,0 +1,16 @@
1/dts-v1/;
2
3/include/ "rt3883.dtsi"
4
5/ {
6 compatible = "ralink,rt3883-eval-board", "ralink,rt3883-soc";
7 model = "Ralink RT3883 evaluation board";
8
9 memory@0 {
10 reg = <0x0 0x2000000>;
11 };
12
13 chosen {
14 bootargs = "console=ttyS0,57600";
15 };
16};
diff --git a/arch/mips/ralink/early_printk.c b/arch/mips/ralink/early_printk.c
index c4ae47eb24ab..b46d0419d09b 100644
--- a/arch/mips/ralink/early_printk.c
+++ b/arch/mips/ralink/early_printk.c
@@ -11,7 +11,11 @@
11 11
12#include <asm/addrspace.h> 12#include <asm/addrspace.h>
13 13
14#ifdef CONFIG_SOC_RT288X
15#define EARLY_UART_BASE 0x300c00
16#else
14#define EARLY_UART_BASE 0x10000c00 17#define EARLY_UART_BASE 0x10000c00
18#endif
15 19
16#define UART_REG_RX 0x00 20#define UART_REG_RX 0x00
17#define UART_REG_TX 0x04 21#define UART_REG_TX 0x04
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 6d054c5ec9ab..320b1f1043ff 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -31,6 +31,7 @@
31#define INTC_INT_GLOBAL BIT(31) 31#define INTC_INT_GLOBAL BIT(31)
32 32
33#define RALINK_CPU_IRQ_INTC (MIPS_CPU_IRQ_BASE + 2) 33#define RALINK_CPU_IRQ_INTC (MIPS_CPU_IRQ_BASE + 2)
34#define RALINK_CPU_IRQ_PCI (MIPS_CPU_IRQ_BASE + 4)
34#define RALINK_CPU_IRQ_FE (MIPS_CPU_IRQ_BASE + 5) 35#define RALINK_CPU_IRQ_FE (MIPS_CPU_IRQ_BASE + 5)
35#define RALINK_CPU_IRQ_WIFI (MIPS_CPU_IRQ_BASE + 6) 36#define RALINK_CPU_IRQ_WIFI (MIPS_CPU_IRQ_BASE + 6)
36#define RALINK_CPU_IRQ_COUNTER (MIPS_CPU_IRQ_BASE + 7) 37#define RALINK_CPU_IRQ_COUNTER (MIPS_CPU_IRQ_BASE + 7)
@@ -104,6 +105,9 @@ asmlinkage void plat_irq_dispatch(void)
104 else if (pending & STATUSF_IP6) 105 else if (pending & STATUSF_IP6)
105 do_IRQ(RALINK_CPU_IRQ_WIFI); 106 do_IRQ(RALINK_CPU_IRQ_WIFI);
106 107
108 else if (pending & STATUSF_IP4)
109 do_IRQ(RALINK_CPU_IRQ_PCI);
110
107 else if (pending & STATUSF_IP2) 111 else if (pending & STATUSF_IP2)
108 do_IRQ(RALINK_CPU_IRQ_INTC); 112 do_IRQ(RALINK_CPU_IRQ_INTC);
109 113
@@ -162,6 +166,7 @@ static int __init intc_of_init(struct device_node *node,
162 irq_set_chained_handler(irq, ralink_intc_irq_handler); 166 irq_set_chained_handler(irq, ralink_intc_irq_handler);
163 irq_set_handler_data(irq, domain); 167 irq_set_handler_data(irq, domain);
164 168
169 /* tell the kernel which irq is used for performance monitoring */
165 cp0_perfcount_irq = irq_create_mapping(domain, 9); 170 cp0_perfcount_irq = irq_create_mapping(domain, 9);
166 171
167 return 0; 172 return 0;
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
new file mode 100644
index 000000000000..0018b1a661f6
--- /dev/null
+++ b/arch/mips/ralink/mt7620.c
@@ -0,0 +1,234 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Parts of this file are based on Ralink's 2.6.21 BSP
7 *
8 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
9 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
10 * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/module.h>
16
17#include <asm/mipsregs.h>
18#include <asm/mach-ralink/ralink_regs.h>
19#include <asm/mach-ralink/mt7620.h>
20
21#include "common.h"
22
23/* does the board have sdram or ddram */
24static int dram_type;
25
26/* the pll dividers */
27static u32 mt7620_clk_divider[] = { 2, 3, 4, 8 };
28
29static struct ralink_pinmux_grp mode_mux[] = {
30 {
31 .name = "i2c",
32 .mask = MT7620_GPIO_MODE_I2C,
33 .gpio_first = 1,
34 .gpio_last = 2,
35 }, {
36 .name = "spi",
37 .mask = MT7620_GPIO_MODE_SPI,
38 .gpio_first = 3,
39 .gpio_last = 6,
40 }, {
41 .name = "uartlite",
42 .mask = MT7620_GPIO_MODE_UART1,
43 .gpio_first = 15,
44 .gpio_last = 16,
45 }, {
46 .name = "wdt",
47 .mask = MT7620_GPIO_MODE_WDT,
48 .gpio_first = 17,
49 .gpio_last = 17,
50 }, {
51 .name = "mdio",
52 .mask = MT7620_GPIO_MODE_MDIO,
53 .gpio_first = 22,
54 .gpio_last = 23,
55 }, {
56 .name = "rgmii1",
57 .mask = MT7620_GPIO_MODE_RGMII1,
58 .gpio_first = 24,
59 .gpio_last = 35,
60 }, {
61 .name = "spi refclk",
62 .mask = MT7620_GPIO_MODE_SPI_REF_CLK,
63 .gpio_first = 37,
64 .gpio_last = 39,
65 }, {
66 .name = "jtag",
67 .mask = MT7620_GPIO_MODE_JTAG,
68 .gpio_first = 40,
69 .gpio_last = 44,
70 }, {
71 /* shared lines with jtag */
72 .name = "ephy",
73 .mask = MT7620_GPIO_MODE_EPHY,
74 .gpio_first = 40,
75 .gpio_last = 44,
76 }, {
77 .name = "nand",
78 .mask = MT7620_GPIO_MODE_JTAG,
79 .gpio_first = 45,
80 .gpio_last = 59,
81 }, {
82 .name = "rgmii2",
83 .mask = MT7620_GPIO_MODE_RGMII2,
84 .gpio_first = 60,
85 .gpio_last = 71,
86 }, {
87 .name = "wled",
88 .mask = MT7620_GPIO_MODE_WLED,
89 .gpio_first = 72,
90 .gpio_last = 72,
91 }, {0}
92};
93
94static struct ralink_pinmux_grp uart_mux[] = {
95 {
96 .name = "uartf",
97 .mask = MT7620_GPIO_MODE_UARTF,
98 .gpio_first = 7,
99 .gpio_last = 14,
100 }, {
101 .name = "pcm uartf",
102 .mask = MT7620_GPIO_MODE_PCM_UARTF,
103 .gpio_first = 7,
104 .gpio_last = 14,
105 }, {
106 .name = "pcm i2s",
107 .mask = MT7620_GPIO_MODE_PCM_I2S,
108 .gpio_first = 7,
109 .gpio_last = 14,
110 }, {
111 .name = "i2s uartf",
112 .mask = MT7620_GPIO_MODE_I2S_UARTF,
113 .gpio_first = 7,
114 .gpio_last = 14,
115 }, {
116 .name = "pcm gpio",
117 .mask = MT7620_GPIO_MODE_PCM_GPIO,
118 .gpio_first = 11,
119 .gpio_last = 14,
120 }, {
121 .name = "gpio uartf",
122 .mask = MT7620_GPIO_MODE_GPIO_UARTF,
123 .gpio_first = 7,
124 .gpio_last = 10,
125 }, {
126 .name = "gpio i2s",
127 .mask = MT7620_GPIO_MODE_GPIO_I2S,
128 .gpio_first = 7,
129 .gpio_last = 10,
130 }, {
131 .name = "gpio",
132 .mask = MT7620_GPIO_MODE_GPIO,
133 }, {0}
134};
135
136struct ralink_pinmux rt_gpio_pinmux = {
137 .mode = mode_mux,
138 .uart = uart_mux,
139 .uart_shift = MT7620_GPIO_MODE_UART0_SHIFT,
140 .uart_mask = MT7620_GPIO_MODE_UART0_MASK,
141};
142
143void __init ralink_clk_init(void)
144{
145 unsigned long cpu_rate, sys_rate;
146 u32 c0 = rt_sysc_r32(SYSC_REG_CPLL_CONFIG0);
147 u32 c1 = rt_sysc_r32(SYSC_REG_CPLL_CONFIG1);
148 u32 swconfig = (c0 >> CPLL_SW_CONFIG_SHIFT) & CPLL_SW_CONFIG_MASK;
149 u32 cpu_clk = (c1 >> CPLL_CPU_CLK_SHIFT) & CPLL_CPU_CLK_MASK;
150
151 if (cpu_clk) {
152 cpu_rate = 480000000;
153 } else if (!swconfig) {
154 cpu_rate = 600000000;
155 } else {
156 u32 m = (c0 >> CPLL_MULT_RATIO_SHIFT) & CPLL_MULT_RATIO;
157 u32 d = (c0 >> CPLL_DIV_RATIO_SHIFT) & CPLL_DIV_RATIO;
158
159 cpu_rate = ((40 * (m + 24)) / mt7620_clk_divider[d]) * 1000000;
160 }
161
162 if (dram_type == SYSCFG0_DRAM_TYPE_SDRAM)
163 sys_rate = cpu_rate / 4;
164 else
165 sys_rate = cpu_rate / 3;
166
167 ralink_clk_add("cpu", cpu_rate);
168 ralink_clk_add("10000100.timer", 40000000);
169 ralink_clk_add("10000500.uart", 40000000);
170 ralink_clk_add("10000c00.uartlite", 40000000);
171}
172
173void __init ralink_of_remap(void)
174{
175 rt_sysc_membase = plat_of_remap_node("ralink,mt7620a-sysc");
176 rt_memc_membase = plat_of_remap_node("ralink,mt7620a-memc");
177
178 if (!rt_sysc_membase || !rt_memc_membase)
179 panic("Failed to remap core resources");
180}
181
182void prom_soc_init(struct ralink_soc_info *soc_info)
183{
184 void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7620_SYSC_BASE);
185 unsigned char *name = NULL;
186 u32 n0;
187 u32 n1;
188 u32 rev;
189 u32 cfg0;
190
191 n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
192 n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
193
194 if (n0 == MT7620N_CHIP_NAME0 && n1 == MT7620N_CHIP_NAME1) {
195 name = "MT7620N";
196 soc_info->compatible = "ralink,mt7620n-soc";
197 } else if (n0 == MT7620A_CHIP_NAME0 && n1 == MT7620A_CHIP_NAME1) {
198 name = "MT7620A";
199 soc_info->compatible = "ralink,mt7620a-soc";
200 } else {
201 panic("mt7620: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
202 }
203
204 rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
205
206 snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
207 "Ralink %s ver:%u eco:%u",
208 name,
209 (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK,
210 (rev & CHIP_REV_ECO_MASK));
211
212 cfg0 = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG0);
213 dram_type = (cfg0 >> SYSCFG0_DRAM_TYPE_SHIFT) & SYSCFG0_DRAM_TYPE_MASK;
214
215 switch (dram_type) {
216 case SYSCFG0_DRAM_TYPE_SDRAM:
217 soc_info->mem_size_min = MT7620_SDRAM_SIZE_MIN;
218 soc_info->mem_size_max = MT7620_SDRAM_SIZE_MAX;
219 break;
220
221 case SYSCFG0_DRAM_TYPE_DDR1:
222 soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN;
223 soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX;
224 break;
225
226 case SYSCFG0_DRAM_TYPE_DDR2:
227 soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN;
228 soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX;
229 break;
230 default:
231 BUG();
232 }
233 soc_info->mem_base = MT7620_DRAM_BASE;
234}
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index 4165e70775be..fb1569580def 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -11,6 +11,7 @@
11#include <linux/io.h> 11#include <linux/io.h>
12#include <linux/clk.h> 12#include <linux/clk.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/sizes.h>
14#include <linux/of_fdt.h> 15#include <linux/of_fdt.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/bootmem.h> 17#include <linux/bootmem.h>
@@ -85,6 +86,14 @@ void __init plat_mem_setup(void)
85 * parsed resulting in our memory appearing 86 * parsed resulting in our memory appearing
86 */ 87 */
87 __dt_setup_arch(&__dtb_start); 88 __dt_setup_arch(&__dtb_start);
89
90 if (soc_info.mem_size)
91 add_memory_region(soc_info.mem_base, soc_info.mem_size,
92 BOOT_MEM_RAM);
93 else
94 detect_memory_region(soc_info.mem_base,
95 soc_info.mem_size_min * SZ_1M,
96 soc_info.mem_size_max * SZ_1M);
88} 97}
89 98
90static int __init plat_of_setup(void) 99static int __init plat_of_setup(void)
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
new file mode 100644
index 000000000000..f87de1ab2198
--- /dev/null
+++ b/arch/mips/ralink/rt288x.c
@@ -0,0 +1,143 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Parts of this file are based on Ralink's 2.6.21 BSP
7 *
8 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
9 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
10 * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/module.h>
16
17#include <asm/mipsregs.h>
18#include <asm/mach-ralink/ralink_regs.h>
19#include <asm/mach-ralink/rt288x.h>
20
21#include "common.h"
22
23static struct ralink_pinmux_grp mode_mux[] = {
24 {
25 .name = "i2c",
26 .mask = RT2880_GPIO_MODE_I2C,
27 .gpio_first = 1,
28 .gpio_last = 2,
29 }, {
30 .name = "spi",
31 .mask = RT2880_GPIO_MODE_SPI,
32 .gpio_first = 3,
33 .gpio_last = 6,
34 }, {
35 .name = "uartlite",
36 .mask = RT2880_GPIO_MODE_UART0,
37 .gpio_first = 7,
38 .gpio_last = 14,
39 }, {
40 .name = "jtag",
41 .mask = RT2880_GPIO_MODE_JTAG,
42 .gpio_first = 17,
43 .gpio_last = 21,
44 }, {
45 .name = "mdio",
46 .mask = RT2880_GPIO_MODE_MDIO,
47 .gpio_first = 22,
48 .gpio_last = 23,
49 }, {
50 .name = "sdram",
51 .mask = RT2880_GPIO_MODE_SDRAM,
52 .gpio_first = 24,
53 .gpio_last = 39,
54 }, {
55 .name = "pci",
56 .mask = RT2880_GPIO_MODE_PCI,
57 .gpio_first = 40,
58 .gpio_last = 71,
59 }, {0}
60};
61
62static void rt288x_wdt_reset(void)
63{
64 u32 t;
65
66 /* enable WDT reset output on pin SRAM_CS_N */
67 t = rt_sysc_r32(SYSC_REG_CLKCFG);
68 t |= CLKCFG_SRAM_CS_N_WDT;
69 rt_sysc_w32(t, SYSC_REG_CLKCFG);
70}
71
72struct ralink_pinmux rt_gpio_pinmux = {
73 .mode = mode_mux,
74 .wdt_reset = rt288x_wdt_reset,
75};
76
77void __init ralink_clk_init(void)
78{
79 unsigned long cpu_rate;
80 u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
81 t = ((t >> SYSTEM_CONFIG_CPUCLK_SHIFT) & SYSTEM_CONFIG_CPUCLK_MASK);
82
83 switch (t) {
84 case SYSTEM_CONFIG_CPUCLK_250:
85 cpu_rate = 250000000;
86 break;
87 case SYSTEM_CONFIG_CPUCLK_266:
88 cpu_rate = 266666667;
89 break;
90 case SYSTEM_CONFIG_CPUCLK_280:
91 cpu_rate = 280000000;
92 break;
93 case SYSTEM_CONFIG_CPUCLK_300:
94 cpu_rate = 300000000;
95 break;
96 }
97
98 ralink_clk_add("cpu", cpu_rate);
99 ralink_clk_add("300100.timer", cpu_rate / 2);
100 ralink_clk_add("300120.watchdog", cpu_rate / 2);
101 ralink_clk_add("300500.uart", cpu_rate / 2);
102 ralink_clk_add("300c00.uartlite", cpu_rate / 2);
103 ralink_clk_add("400000.ethernet", cpu_rate / 2);
104}
105
106void __init ralink_of_remap(void)
107{
108 rt_sysc_membase = plat_of_remap_node("ralink,rt2880-sysc");
109 rt_memc_membase = plat_of_remap_node("ralink,rt2880-memc");
110
111 if (!rt_sysc_membase || !rt_memc_membase)
112 panic("Failed to remap core resources");
113}
114
115void prom_soc_init(struct ralink_soc_info *soc_info)
116{
117 void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT2880_SYSC_BASE);
118 const char *name;
119 u32 n0;
120 u32 n1;
121 u32 id;
122
123 n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
124 n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
125 id = __raw_readl(sysc + SYSC_REG_CHIP_ID);
126
127 if (n0 == RT2880_CHIP_NAME0 && n1 == RT2880_CHIP_NAME1) {
128 soc_info->compatible = "ralink,r2880-soc";
129 name = "RT2880";
130 } else {
131 panic("rt288x: unknown SoC, n0:%08x n1:%08x", n0, n1);
132 }
133
134 snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
135 "Ralink %s id:%u rev:%u",
136 name,
137 (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
138 (id & CHIP_ID_REV_MASK));
139
140 soc_info->mem_base = RT2880_SDRAM_BASE;
141 soc_info->mem_size_min = RT2880_MEM_SIZE_MIN;
142 soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
143}
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
index 0a4bbdcf59d9..ca7ee3a33790 100644
--- a/arch/mips/ralink/rt305x.c
+++ b/arch/mips/ralink/rt305x.c
@@ -22,7 +22,7 @@
22 22
23enum rt305x_soc_type rt305x_soc; 23enum rt305x_soc_type rt305x_soc;
24 24
25struct ralink_pinmux_grp mode_mux[] = { 25static struct ralink_pinmux_grp mode_mux[] = {
26 { 26 {
27 .name = "i2c", 27 .name = "i2c",
28 .mask = RT305X_GPIO_MODE_I2C, 28 .mask = RT305X_GPIO_MODE_I2C,
@@ -61,7 +61,7 @@ struct ralink_pinmux_grp mode_mux[] = {
61 }, {0} 61 }, {0}
62}; 62};
63 63
64struct ralink_pinmux_grp uart_mux[] = { 64static struct ralink_pinmux_grp uart_mux[] = {
65 { 65 {
66 .name = "uartf", 66 .name = "uartf",
67 .mask = RT305X_GPIO_MODE_UARTF, 67 .mask = RT305X_GPIO_MODE_UARTF,
@@ -91,19 +91,19 @@ struct ralink_pinmux_grp uart_mux[] = {
91 .name = "gpio uartf", 91 .name = "gpio uartf",
92 .mask = RT305X_GPIO_MODE_GPIO_UARTF, 92 .mask = RT305X_GPIO_MODE_GPIO_UARTF,
93 .gpio_first = RT305X_GPIO_7, 93 .gpio_first = RT305X_GPIO_7,
94 .gpio_last = RT305X_GPIO_14, 94 .gpio_last = RT305X_GPIO_10,
95 }, { 95 }, {
96 .name = "gpio i2s", 96 .name = "gpio i2s",
97 .mask = RT305X_GPIO_MODE_GPIO_I2S, 97 .mask = RT305X_GPIO_MODE_GPIO_I2S,
98 .gpio_first = RT305X_GPIO_7, 98 .gpio_first = RT305X_GPIO_7,
99 .gpio_last = RT305X_GPIO_14, 99 .gpio_last = RT305X_GPIO_10,
100 }, { 100 }, {
101 .name = "gpio", 101 .name = "gpio",
102 .mask = RT305X_GPIO_MODE_GPIO, 102 .mask = RT305X_GPIO_MODE_GPIO,
103 }, {0} 103 }, {0}
104}; 104};
105 105
106void rt305x_wdt_reset(void) 106static void rt305x_wdt_reset(void)
107{ 107{
108 u32 t; 108 u32 t;
109 109
@@ -114,16 +114,53 @@ void rt305x_wdt_reset(void)
114 rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG); 114 rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG);
115} 115}
116 116
117struct ralink_pinmux gpio_pinmux = { 117struct ralink_pinmux rt_gpio_pinmux = {
118 .mode = mode_mux, 118 .mode = mode_mux,
119 .uart = uart_mux, 119 .uart = uart_mux,
120 .uart_shift = RT305X_GPIO_MODE_UART0_SHIFT, 120 .uart_shift = RT305X_GPIO_MODE_UART0_SHIFT,
121 .uart_mask = RT305X_GPIO_MODE_UART0_MASK,
121 .wdt_reset = rt305x_wdt_reset, 122 .wdt_reset = rt305x_wdt_reset,
122}; 123};
123 124
125static unsigned long rt5350_get_mem_size(void)
126{
127 void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
128 unsigned long ret;
129 u32 t;
130
131 t = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG);
132 t = (t >> RT5350_SYSCFG0_DRAM_SIZE_SHIFT) &
133 RT5350_SYSCFG0_DRAM_SIZE_MASK;
134
135 switch (t) {
136 case RT5350_SYSCFG0_DRAM_SIZE_2M:
137 ret = 2;
138 break;
139 case RT5350_SYSCFG0_DRAM_SIZE_8M:
140 ret = 8;
141 break;
142 case RT5350_SYSCFG0_DRAM_SIZE_16M:
143 ret = 16;
144 break;
145 case RT5350_SYSCFG0_DRAM_SIZE_32M:
146 ret = 32;
147 break;
148 case RT5350_SYSCFG0_DRAM_SIZE_64M:
149 ret = 64;
150 break;
151 default:
152 panic("rt5350: invalid DRAM size: %u", t);
153 break;
154 }
155
156 return ret;
157}
158
124void __init ralink_clk_init(void) 159void __init ralink_clk_init(void)
125{ 160{
126 unsigned long cpu_rate, sys_rate, wdt_rate, uart_rate; 161 unsigned long cpu_rate, sys_rate, wdt_rate, uart_rate;
162 unsigned long wmac_rate = 40000000;
163
127 u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG); 164 u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
128 165
129 if (soc_is_rt305x() || soc_is_rt3350()) { 166 if (soc_is_rt305x() || soc_is_rt3350()) {
@@ -176,11 +213,21 @@ void __init ralink_clk_init(void)
176 BUG(); 213 BUG();
177 } 214 }
178 215
216 if (soc_is_rt3352() || soc_is_rt5350()) {
217 u32 val = rt_sysc_r32(RT3352_SYSC_REG_SYSCFG0);
218
219 if (!(val & RT3352_CLKCFG0_XTAL_SEL))
220 wmac_rate = 20000000;
221 }
222
179 ralink_clk_add("cpu", cpu_rate); 223 ralink_clk_add("cpu", cpu_rate);
180 ralink_clk_add("10000b00.spi", sys_rate); 224 ralink_clk_add("10000b00.spi", sys_rate);
181 ralink_clk_add("10000100.timer", wdt_rate); 225 ralink_clk_add("10000100.timer", wdt_rate);
226 ralink_clk_add("10000120.watchdog", wdt_rate);
182 ralink_clk_add("10000500.uart", uart_rate); 227 ralink_clk_add("10000500.uart", uart_rate);
183 ralink_clk_add("10000c00.uartlite", uart_rate); 228 ralink_clk_add("10000c00.uartlite", uart_rate);
229 ralink_clk_add("10100000.ethernet", sys_rate);
230 ralink_clk_add("10180000.wmac", wmac_rate);
184} 231}
185 232
186void __init ralink_of_remap(void) 233void __init ralink_of_remap(void)
@@ -239,4 +286,15 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
239 name, 286 name,
240 (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK, 287 (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
241 (id & CHIP_ID_REV_MASK)); 288 (id & CHIP_ID_REV_MASK));
289
290 soc_info->mem_base = RT305X_SDRAM_BASE;
291 if (soc_is_rt5350()) {
292 soc_info->mem_size = rt5350_get_mem_size();
293 } else if (soc_is_rt305x() || soc_is_rt3350()) {
294 soc_info->mem_size_min = RT305X_MEM_SIZE_MIN;
295 soc_info->mem_size_max = RT305X_MEM_SIZE_MAX;
296 } else if (soc_is_rt3352()) {
297 soc_info->mem_size_min = RT3352_MEM_SIZE_MIN;
298 soc_info->mem_size_max = RT3352_MEM_SIZE_MAX;
299 }
242} 300}
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
new file mode 100644
index 000000000000..b474ac284b83
--- /dev/null
+++ b/arch/mips/ralink/rt3883.c
@@ -0,0 +1,246 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Parts of this file are based on Ralink's 2.6.21 BSP
7 *
8 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
9 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
10 * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/module.h>
16
17#include <asm/mipsregs.h>
18#include <asm/mach-ralink/ralink_regs.h>
19#include <asm/mach-ralink/rt3883.h>
20
21#include "common.h"
22
23static struct ralink_pinmux_grp mode_mux[] = {
24 {
25 .name = "i2c",
26 .mask = RT3883_GPIO_MODE_I2C,
27 .gpio_first = RT3883_GPIO_I2C_SD,
28 .gpio_last = RT3883_GPIO_I2C_SCLK,
29 }, {
30 .name = "spi",
31 .mask = RT3883_GPIO_MODE_SPI,
32 .gpio_first = RT3883_GPIO_SPI_CS0,
33 .gpio_last = RT3883_GPIO_SPI_MISO,
34 }, {
35 .name = "uartlite",
36 .mask = RT3883_GPIO_MODE_UART1,
37 .gpio_first = RT3883_GPIO_UART1_TXD,
38 .gpio_last = RT3883_GPIO_UART1_RXD,
39 }, {
40 .name = "jtag",
41 .mask = RT3883_GPIO_MODE_JTAG,
42 .gpio_first = RT3883_GPIO_JTAG_TDO,
43 .gpio_last = RT3883_GPIO_JTAG_TCLK,
44 }, {
45 .name = "mdio",
46 .mask = RT3883_GPIO_MODE_MDIO,
47 .gpio_first = RT3883_GPIO_MDIO_MDC,
48 .gpio_last = RT3883_GPIO_MDIO_MDIO,
49 }, {
50 .name = "ge1",
51 .mask = RT3883_GPIO_MODE_GE1,
52 .gpio_first = RT3883_GPIO_GE1_TXD0,
53 .gpio_last = RT3883_GPIO_GE1_RXCLK,
54 }, {
55 .name = "ge2",
56 .mask = RT3883_GPIO_MODE_GE2,
57 .gpio_first = RT3883_GPIO_GE2_TXD0,
58 .gpio_last = RT3883_GPIO_GE2_RXCLK,
59 }, {
60 .name = "pci",
61 .mask = RT3883_GPIO_MODE_PCI,
62 .gpio_first = RT3883_GPIO_PCI_AD0,
63 .gpio_last = RT3883_GPIO_PCI_AD31,
64 }, {
65 .name = "lna a",
66 .mask = RT3883_GPIO_MODE_LNA_A,
67 .gpio_first = RT3883_GPIO_LNA_PE_A0,
68 .gpio_last = RT3883_GPIO_LNA_PE_A2,
69 }, {
70 .name = "lna g",
71 .mask = RT3883_GPIO_MODE_LNA_G,
72 .gpio_first = RT3883_GPIO_LNA_PE_G0,
73 .gpio_last = RT3883_GPIO_LNA_PE_G2,
74 }, {0}
75};
76
77static struct ralink_pinmux_grp uart_mux[] = {
78 {
79 .name = "uartf",
80 .mask = RT3883_GPIO_MODE_UARTF,
81 .gpio_first = RT3883_GPIO_7,
82 .gpio_last = RT3883_GPIO_14,
83 }, {
84 .name = "pcm uartf",
85 .mask = RT3883_GPIO_MODE_PCM_UARTF,
86 .gpio_first = RT3883_GPIO_7,
87 .gpio_last = RT3883_GPIO_14,
88 }, {
89 .name = "pcm i2s",
90 .mask = RT3883_GPIO_MODE_PCM_I2S,
91 .gpio_first = RT3883_GPIO_7,
92 .gpio_last = RT3883_GPIO_14,
93 }, {
94 .name = "i2s uartf",
95 .mask = RT3883_GPIO_MODE_I2S_UARTF,
96 .gpio_first = RT3883_GPIO_7,
97 .gpio_last = RT3883_GPIO_14,
98 }, {
99 .name = "pcm gpio",
100 .mask = RT3883_GPIO_MODE_PCM_GPIO,
101 .gpio_first = RT3883_GPIO_11,
102 .gpio_last = RT3883_GPIO_14,
103 }, {
104 .name = "gpio uartf",
105 .mask = RT3883_GPIO_MODE_GPIO_UARTF,
106 .gpio_first = RT3883_GPIO_7,
107 .gpio_last = RT3883_GPIO_10,
108 }, {
109 .name = "gpio i2s",
110 .mask = RT3883_GPIO_MODE_GPIO_I2S,
111 .gpio_first = RT3883_GPIO_7,
112 .gpio_last = RT3883_GPIO_10,
113 }, {
114 .name = "gpio",
115 .mask = RT3883_GPIO_MODE_GPIO,
116 }, {0}
117};
118
119static struct ralink_pinmux_grp pci_mux[] = {
120 {
121 .name = "pci-dev",
122 .mask = 0,
123 .gpio_first = RT3883_GPIO_PCI_AD0,
124 .gpio_last = RT3883_GPIO_PCI_AD31,
125 }, {
126 .name = "pci-host2",
127 .mask = 1,
128 .gpio_first = RT3883_GPIO_PCI_AD0,
129 .gpio_last = RT3883_GPIO_PCI_AD31,
130 }, {
131 .name = "pci-host1",
132 .mask = 2,
133 .gpio_first = RT3883_GPIO_PCI_AD0,
134 .gpio_last = RT3883_GPIO_PCI_AD31,
135 }, {
136 .name = "pci-fnc",
137 .mask = 3,
138 .gpio_first = RT3883_GPIO_PCI_AD0,
139 .gpio_last = RT3883_GPIO_PCI_AD31,
140 }, {
141 .name = "pci-gpio",
142 .mask = 7,
143 .gpio_first = RT3883_GPIO_PCI_AD0,
144 .gpio_last = RT3883_GPIO_PCI_AD31,
145 }, {0}
146};
147
148static void rt3883_wdt_reset(void)
149{
150 u32 t;
151
152 /* enable WDT reset output on GPIO 2 */
153 t = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1);
154 t |= RT3883_SYSCFG1_GPIO2_AS_WDT_OUT;
155 rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1);
156}
157
158struct ralink_pinmux rt_gpio_pinmux = {
159 .mode = mode_mux,
160 .uart = uart_mux,
161 .uart_shift = RT3883_GPIO_MODE_UART0_SHIFT,
162 .uart_mask = RT3883_GPIO_MODE_UART0_MASK,
163 .wdt_reset = rt3883_wdt_reset,
164 .pci = pci_mux,
165 .pci_shift = RT3883_GPIO_MODE_PCI_SHIFT,
166 .pci_mask = RT3883_GPIO_MODE_PCI_MASK,
167};
168
169void __init ralink_clk_init(void)
170{
171 unsigned long cpu_rate, sys_rate;
172 u32 syscfg0;
173 u32 clksel;
174 u32 ddr2;
175
176 syscfg0 = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG0);
177 clksel = ((syscfg0 >> RT3883_SYSCFG0_CPUCLK_SHIFT) &
178 RT3883_SYSCFG0_CPUCLK_MASK);
179 ddr2 = syscfg0 & RT3883_SYSCFG0_DRAM_TYPE_DDR2;
180
181 switch (clksel) {
182 case RT3883_SYSCFG0_CPUCLK_250:
183 cpu_rate = 250000000;
184 sys_rate = (ddr2) ? 125000000 : 83000000;
185 break;
186 case RT3883_SYSCFG0_CPUCLK_384:
187 cpu_rate = 384000000;
188 sys_rate = (ddr2) ? 128000000 : 96000000;
189 break;
190 case RT3883_SYSCFG0_CPUCLK_480:
191 cpu_rate = 480000000;
192 sys_rate = (ddr2) ? 160000000 : 120000000;
193 break;
194 case RT3883_SYSCFG0_CPUCLK_500:
195 cpu_rate = 500000000;
196 sys_rate = (ddr2) ? 166000000 : 125000000;
197 break;
198 }
199
200 ralink_clk_add("cpu", cpu_rate);
201 ralink_clk_add("10000100.timer", sys_rate);
202 ralink_clk_add("10000120.watchdog", sys_rate);
203 ralink_clk_add("10000500.uart", 40000000);
204 ralink_clk_add("10000b00.spi", sys_rate);
205 ralink_clk_add("10000c00.uartlite", 40000000);
206 ralink_clk_add("10100000.ethernet", sys_rate);
207}
208
209void __init ralink_of_remap(void)
210{
211 rt_sysc_membase = plat_of_remap_node("ralink,rt3883-sysc");
212 rt_memc_membase = plat_of_remap_node("ralink,rt3883-memc");
213
214 if (!rt_sysc_membase || !rt_memc_membase)
215 panic("Failed to remap core resources");
216}
217
218void prom_soc_init(struct ralink_soc_info *soc_info)
219{
220 void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT3883_SYSC_BASE);
221 const char *name;
222 u32 n0;
223 u32 n1;
224 u32 id;
225
226 n0 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID0_3);
227 n1 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID4_7);
228 id = __raw_readl(sysc + RT3883_SYSC_REG_REVID);
229
230 if (n0 == RT3883_CHIP_NAME0 && n1 == RT3883_CHIP_NAME1) {
231 soc_info->compatible = "ralink,rt3883-soc";
232 name = "RT3883";
233 } else {
234 panic("rt3883: unknown SoC, n0:%08x n1:%08x", n0, n1);
235 }
236
237 snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
238 "Ralink %s ver:%u eco:%u",
239 name,
240 (id >> RT3883_REVID_VER_ID_SHIFT) & RT3883_REVID_VER_ID_MASK,
241 (id & RT3883_REVID_ECO_ID_MASK));
242
243 soc_info->mem_base = RT3883_SDRAM_BASE;
244 soc_info->mem_size_min = RT3883_MEM_SIZE_MIN;
245 soc_info->mem_size_max = RT3883_MEM_SIZE_MAX;
246}
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
index 1d1919a44e88..7a53b1e28a93 100644
--- a/arch/mips/sgi-ip27/ip27-klnuma.c
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -114,7 +114,7 @@ void __init replicate_kernel_text()
114 * data structures on the first couple of pages of the first slot of each 114 * data structures on the first couple of pages of the first slot of each
115 * node. If this is the case, getfirstfree(node) > getslotstart(node, 0). 115 * node. If this is the case, getfirstfree(node) > getslotstart(node, 0).
116 */ 116 */
117pfn_t node_getfirstfree(cnodeid_t cnode) 117unsigned long node_getfirstfree(cnodeid_t cnode)
118{ 118{
119 unsigned long loadbase = REP_BASE; 119 unsigned long loadbase = REP_BASE;
120 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); 120 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 5f2bddb1860e..1230f56429d7 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -255,14 +255,14 @@ static void __init dump_topology(void)
255 } 255 }
256} 256}
257 257
258static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot) 258static unsigned long __init slot_getbasepfn(cnodeid_t cnode, int slot)
259{ 259{
260 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); 260 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
261 261
262 return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT); 262 return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
263} 263}
264 264
265static pfn_t __init slot_psize_compute(cnodeid_t node, int slot) 265static unsigned long __init slot_psize_compute(cnodeid_t node, int slot)
266{ 266{
267 nasid_t nasid; 267 nasid_t nasid;
268 lboard_t *brd; 268 lboard_t *brd;
@@ -353,7 +353,7 @@ static void __init mlreset(void)
353 353
354static void __init szmem(void) 354static void __init szmem(void)
355{ 355{
356 pfn_t slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */ 356 unsigned long slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
357 int slot; 357 int slot;
358 cnodeid_t node; 358 cnodeid_t node;
359 359
@@ -390,10 +390,10 @@ static void __init szmem(void)
390 390
391static void __init node_mem_init(cnodeid_t node) 391static void __init node_mem_init(cnodeid_t node)
392{ 392{
393 pfn_t slot_firstpfn = slot_getbasepfn(node, 0); 393 unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
394 pfn_t slot_freepfn = node_getfirstfree(node); 394 unsigned long slot_freepfn = node_getfirstfree(node);
395 unsigned long bootmap_size; 395 unsigned long bootmap_size;
396 pfn_t start_pfn, end_pfn; 396 unsigned long start_pfn, end_pfn;
397 397
398 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); 398 get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
399 399
@@ -467,7 +467,7 @@ void __init paging_init(void)
467 pagetable_init(); 467 pagetable_init();
468 468
469 for_each_online_node(node) { 469 for_each_online_node(node) {
470 pfn_t start_pfn, end_pfn; 470 unsigned long start_pfn, end_pfn;
471 471
472 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); 472 get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
473 473
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index fff58ac176f3..2e21b761cb9c 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -69,7 +69,7 @@ static void rt_set_mode(enum clock_event_mode mode,
69 /* Nothing to do ... */ 69 /* Nothing to do ... */
70} 70}
71 71
72int rt_timer_irq; 72unsigned int rt_timer_irq;
73 73
74static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent); 74static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
75static DEFINE_PER_CPU(char [11], hub_rt_name); 75static DEFINE_PER_CPU(char [11], hub_rt_name);
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index f517e08e7f0d..a134ff4da12e 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -59,11 +59,3 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
59 current->comm, current->pid, r20); 59 current->comm, current->pid, r20);
60 return -ENOSYS; 60 return -ENOSYS;
61} 61}
62
63asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi,
64 u32 mask_lo, int fd,
65 const char __user *pathname)
66{
67 return sys_fanotify_mark(fan_fd, flags, ((u64)mask_hi << 32) | mask_lo,
68 fd, pathname);
69}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 3fe5259e2fea..915fbb4fc2fe 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -150,7 +150,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
150 CURRENT_THREAD_INFO(r11, r1) 150 CURRENT_THREAD_INFO(r11, r1)
151 ld r10,TI_FLAGS(r11) 151 ld r10,TI_FLAGS(r11)
152 andi. r11,r10,_TIF_SYSCALL_T_OR_A 152 andi. r11,r10,_TIF_SYSCALL_T_OR_A
153 bne- syscall_dotrace 153 bne syscall_dotrace
154.Lsyscall_dotrace_cont: 154.Lsyscall_dotrace_cont:
155 cmpldi 0,r0,NR_syscalls 155 cmpldi 0,r0,NR_syscalls
156 bge- syscall_enosys 156 bge- syscall_enosys
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index cd6e19d263b3..8a285876aef8 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -126,11 +126,3 @@ asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags,
126 126
127 return sys_sync_file_range(fd, offset, nbytes, flags); 127 return sys_sync_file_range(fd, offset, nbytes, flags);
128} 128}
129
130asmlinkage long compat_sys_fanotify_mark(int fanotify_fd, unsigned int flags,
131 unsigned mask_hi, unsigned mask_lo,
132 int dfd, const char __user *pathname)
133{
134 u64 mask = ((u64)mask_hi << 32) | mask_lo;
135 return sys_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
136}
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 2d72d9e96c15..9cb1b975b353 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -793,10 +793,6 @@ ENTRY(sys32_stime_wrapper)
793 llgtr %r2,%r2 # long * 793 llgtr %r2,%r2 # long *
794 jg compat_sys_stime # branch to system call 794 jg compat_sys_stime # branch to system call
795 795
796ENTRY(sys32_sysctl_wrapper)
797 llgtr %r2,%r2 # struct compat_sysctl_args *
798 jg compat_sys_sysctl
799
800ENTRY(sys32_fstat64_wrapper) 796ENTRY(sys32_fstat64_wrapper)
801 llgfr %r2,%r2 # unsigned long 797 llgfr %r2,%r2 # unsigned long
802 llgtr %r3,%r3 # struct stat64 * 798 llgtr %r3,%r3 # struct stat64 *
@@ -1349,15 +1345,6 @@ ENTRY(sys_fanotify_init_wrapper)
1349 llgfr %r3,%r3 # unsigned int 1345 llgfr %r3,%r3 # unsigned int
1350 jg sys_fanotify_init # branch to system call 1346 jg sys_fanotify_init # branch to system call
1351 1347
1352ENTRY(sys_fanotify_mark_wrapper)
1353 lgfr %r2,%r2 # int
1354 llgfr %r3,%r3 # unsigned int
1355 sllg %r4,%r4,32 # get high word of 64bit mask
1356 lr %r4,%r5 # get low word of 64bit mask
1357 llgfr %r5,%r6 # unsigned int
1358 llgt %r6,164(%r15) # char *
1359 jg sys_fanotify_mark # branch to system call
1360
1361ENTRY(sys_prlimit64_wrapper) 1348ENTRY(sys_prlimit64_wrapper)
1362 lgfr %r2,%r2 # pid_t 1349 lgfr %r2,%r2 # pid_t
1363 llgfr %r3,%r3 # unsigned int 1350 llgfr %r3,%r3 # unsigned int
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 9f214e992eed..913410bd74a3 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -157,7 +157,7 @@ SYSCALL(sys_readv,sys_readv,compat_sys_readv_wrapper) /* 145 */
157SYSCALL(sys_writev,sys_writev,compat_sys_writev_wrapper) 157SYSCALL(sys_writev,sys_writev,compat_sys_writev_wrapper)
158SYSCALL(sys_getsid,sys_getsid,sys32_getsid_wrapper) 158SYSCALL(sys_getsid,sys_getsid,sys32_getsid_wrapper)
159SYSCALL(sys_fdatasync,sys_fdatasync,sys32_fdatasync_wrapper) 159SYSCALL(sys_fdatasync,sys_fdatasync,sys32_fdatasync_wrapper)
160SYSCALL(sys_sysctl,sys_sysctl,sys32_sysctl_wrapper) 160SYSCALL(sys_sysctl,sys_sysctl,compat_sys_sysctl)
161SYSCALL(sys_mlock,sys_mlock,sys32_mlock_wrapper) /* 150 */ 161SYSCALL(sys_mlock,sys_mlock,sys32_mlock_wrapper) /* 150 */
162SYSCALL(sys_munlock,sys_munlock,sys32_munlock_wrapper) 162SYSCALL(sys_munlock,sys_munlock,sys32_munlock_wrapper)
163SYSCALL(sys_mlockall,sys_mlockall,sys32_mlockall_wrapper) 163SYSCALL(sys_mlockall,sys_mlockall,sys32_mlockall_wrapper)
@@ -341,7 +341,7 @@ SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev)
341SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */ 341SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */
342SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper) 342SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
343SYSCALL(sys_fanotify_init,sys_fanotify_init,sys_fanotify_init_wrapper) 343SYSCALL(sys_fanotify_init,sys_fanotify_init,sys_fanotify_init_wrapper)
344SYSCALL(sys_fanotify_mark,sys_fanotify_mark,sys_fanotify_mark_wrapper) 344SYSCALL(sys_fanotify_mark,sys_fanotify_mark,compat_sys_fanotify_mark)
345SYSCALL(sys_prlimit64,sys_prlimit64,sys_prlimit64_wrapper) 345SYSCALL(sys_prlimit64,sys_prlimit64,sys_prlimit64_wrapper)
346SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,sys_name_to_handle_at_wrapper) /* 335 */ 346SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,sys_name_to_handle_at_wrapper) /* 335 */
347SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at) 347SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at)
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index 2e680b5245c9..f7c72b6efc27 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -239,15 +239,6 @@ do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */
239 nop 239 nop
240 nop 240 nop
241 241
242 .globl sys32_fanotify_mark
243sys32_fanotify_mark:
244 sethi %hi(sys_fanotify_mark), %g1
245 sllx %o2, 32, %o2
246 or %o2, %o3, %o2
247 mov %o4, %o3
248 jmpl %g1 + %lo(sys_fanotify_mark), %g0
249 mov %o5, %o4
250
251 .section __ex_table,"a" 242 .section __ex_table,"a"
252 .align 4 243 .align 4
253 .word 1b, __retl_efault, 2b, __retl_efault 244 .word 1b, __retl_efault, 2b, __retl_efault
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 8fd932080215..6d81597064b6 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -84,7 +84,7 @@ sys_call_table32:
84 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 84 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv 85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init 86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
87/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime 87/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev 88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module 89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module
90 90
diff --git a/arch/unicore32/kernel/sys.c b/arch/unicore32/kernel/sys.c
index cfe79c9529b3..f9e862539314 100644
--- a/arch/unicore32/kernel/sys.c
+++ b/arch/unicore32/kernel/sys.c
@@ -28,19 +28,11 @@
28#include <asm/syscalls.h> 28#include <asm/syscalls.h>
29#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
30 30
31/* Note: used by the compat code even in 64-bit Linux. */
32SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
33 unsigned long, prot, unsigned long, flags,
34 unsigned long, fd, unsigned long, off_4k)
35{
36 return sys_mmap_pgoff(addr, len, prot, flags, fd,
37 off_4k);
38}
39
40/* Provide the actual syscall number to call mapping. */ 31/* Provide the actual syscall number to call mapping. */
41#undef __SYSCALL 32#undef __SYSCALL
42#define __SYSCALL(nr, call) [nr] = (call), 33#define __SYSCALL(nr, call) [nr] = (call),
43 34
35#define sys_mmap2 sys_mmap_pgoff
44/* Note that we don't include <linux/unistd.h> but <asm/unistd.h> */ 36/* Note that we don't include <linux/unistd.h> but <asm/unistd.h> */
45void *sys_call_table[__NR_syscalls] = { 37void *sys_call_table[__NR_syscalls] = {
46 [0 ... __NR_syscalls-1] = sys_ni_syscall, 38 [0 ... __NR_syscalls-1] = sys_ni_syscall,
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 4e4907c67d92..8e0ceecdc957 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -243,12 +243,3 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
243 return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo, 243 return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
244 ((u64)len_hi << 32) | len_lo); 244 ((u64)len_hi << 32) | len_lo);
245} 245}
246
247asmlinkage long sys32_fanotify_mark(int fanotify_fd, unsigned int flags,
248 u32 mask_lo, u32 mask_hi,
249 int fd, const char __user *pathname)
250{
251 return sys_fanotify_mark(fanotify_fd, flags,
252 ((u64)mask_hi << 32) | mask_lo,
253 fd, pathname);
254}
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index 0ef202e232d6..82c34ee25a65 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -50,9 +50,6 @@ asmlinkage long sys32_fallocate(int, int, unsigned,
50asmlinkage long sys32_sigreturn(void); 50asmlinkage long sys32_sigreturn(void);
51asmlinkage long sys32_rt_sigreturn(void); 51asmlinkage long sys32_rt_sigreturn(void);
52 52
53asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int,
54 const char __user *);
55
56#endif /* CONFIG_COMPAT */ 53#endif /* CONFIG_COMPAT */
57 54
58#endif /* _ASM_X86_SYS_IA32_H */ 55#endif /* _ASM_X86_SYS_IA32_H */
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 5f87b35fd2ef..2917a6452c49 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -37,8 +37,8 @@ asmlinkage long sys_get_thread_area(struct user_desc __user *);
37unsigned long sys_sigreturn(void); 37unsigned long sys_sigreturn(void);
38 38
39/* kernel/vm86_32.c */ 39/* kernel/vm86_32.c */
40int sys_vm86old(struct vm86_struct __user *); 40asmlinkage long sys_vm86old(struct vm86_struct __user *);
41int sys_vm86(unsigned long, unsigned long); 41asmlinkage long sys_vm86(unsigned long, unsigned long);
42 42
43#else /* CONFIG_X86_32 */ 43#else /* CONFIG_X86_32 */
44 44
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index b3a4866661c5..2af848dfa754 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -120,6 +120,9 @@
120#define MSR_CORE_C6_RESIDENCY 0x000003fd 120#define MSR_CORE_C6_RESIDENCY 0x000003fd
121#define MSR_CORE_C7_RESIDENCY 0x000003fe 121#define MSR_CORE_C7_RESIDENCY 0x000003fe
122#define MSR_PKG_C2_RESIDENCY 0x0000060d 122#define MSR_PKG_C2_RESIDENCY 0x0000060d
123#define MSR_PKG_C8_RESIDENCY 0x00000630
124#define MSR_PKG_C9_RESIDENCY 0x00000631
125#define MSR_PKG_C10_RESIDENCY 0x00000632
123 126
124/* Run Time Average Power Limiting (RAPL) Interface */ 127/* Run Time Average Power Limiting (RAPL) Interface */
125 128
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 1cf5766dde16..e8edcf52e069 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -33,6 +33,7 @@
33#include <linux/capability.h> 33#include <linux/capability.h>
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/syscalls.h>
36#include <linux/sched.h> 37#include <linux/sched.h>
37#include <linux/kernel.h> 38#include <linux/kernel.h>
38#include <linux/signal.h> 39#include <linux/signal.h>
@@ -48,7 +49,6 @@
48#include <asm/io.h> 49#include <asm/io.h>
49#include <asm/tlbflush.h> 50#include <asm/tlbflush.h>
50#include <asm/irq.h> 51#include <asm/irq.h>
51#include <asm/syscalls.h>
52 52
53/* 53/*
54 * Known problems: 54 * Known problems:
@@ -202,36 +202,32 @@ out:
202static int do_vm86_irq_handling(int subfunction, int irqnumber); 202static int do_vm86_irq_handling(int subfunction, int irqnumber);
203static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); 203static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk);
204 204
205int sys_vm86old(struct vm86_struct __user *v86) 205SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
206{ 206{
207 struct kernel_vm86_struct info; /* declare this _on top_, 207 struct kernel_vm86_struct info; /* declare this _on top_,
208 * this avoids wasting of stack space. 208 * this avoids wasting of stack space.
209 * This remains on the stack until we 209 * This remains on the stack until we
210 * return to 32 bit user space. 210 * return to 32 bit user space.
211 */ 211 */
212 struct task_struct *tsk; 212 struct task_struct *tsk = current;
213 int tmp, ret = -EPERM; 213 int tmp;
214 214
215 tsk = current;
216 if (tsk->thread.saved_sp0) 215 if (tsk->thread.saved_sp0)
217 goto out; 216 return -EPERM;
218 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, 217 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
219 offsetof(struct kernel_vm86_struct, vm86plus) - 218 offsetof(struct kernel_vm86_struct, vm86plus) -
220 sizeof(info.regs)); 219 sizeof(info.regs));
221 ret = -EFAULT;
222 if (tmp) 220 if (tmp)
223 goto out; 221 return -EFAULT;
224 memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); 222 memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus);
225 info.regs32 = current_pt_regs(); 223 info.regs32 = current_pt_regs();
226 tsk->thread.vm86_info = v86; 224 tsk->thread.vm86_info = v86;
227 do_sys_vm86(&info, tsk); 225 do_sys_vm86(&info, tsk);
228 ret = 0; /* we never return here */ 226 return 0; /* we never return here */
229out:
230 return ret;
231} 227}
232 228
233 229
234int sys_vm86(unsigned long cmd, unsigned long arg) 230SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
235{ 231{
236 struct kernel_vm86_struct info; /* declare this _on top_, 232 struct kernel_vm86_struct info; /* declare this _on top_,
237 * this avoids wasting of stack space. 233 * this avoids wasting of stack space.
@@ -239,7 +235,7 @@ int sys_vm86(unsigned long cmd, unsigned long arg)
239 * return to 32 bit user space. 235 * return to 32 bit user space.
240 */ 236 */
241 struct task_struct *tsk; 237 struct task_struct *tsk;
242 int tmp, ret; 238 int tmp;
243 struct vm86plus_struct __user *v86; 239 struct vm86plus_struct __user *v86;
244 240
245 tsk = current; 241 tsk = current;
@@ -248,8 +244,7 @@ int sys_vm86(unsigned long cmd, unsigned long arg)
248 case VM86_FREE_IRQ: 244 case VM86_FREE_IRQ:
249 case VM86_GET_IRQ_BITS: 245 case VM86_GET_IRQ_BITS:
250 case VM86_GET_AND_RESET_IRQ: 246 case VM86_GET_AND_RESET_IRQ:
251 ret = do_vm86_irq_handling(cmd, (int)arg); 247 return do_vm86_irq_handling(cmd, (int)arg);
252 goto out;
253 case VM86_PLUS_INSTALL_CHECK: 248 case VM86_PLUS_INSTALL_CHECK:
254 /* 249 /*
255 * NOTE: on old vm86 stuff this will return the error 250 * NOTE: on old vm86 stuff this will return the error
@@ -257,28 +252,23 @@ int sys_vm86(unsigned long cmd, unsigned long arg)
257 * interpreted as (invalid) address to vm86_struct. 252 * interpreted as (invalid) address to vm86_struct.
258 * So the installation check works. 253 * So the installation check works.
259 */ 254 */
260 ret = 0; 255 return 0;
261 goto out;
262 } 256 }
263 257
264 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ 258 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
265 ret = -EPERM;
266 if (tsk->thread.saved_sp0) 259 if (tsk->thread.saved_sp0)
267 goto out; 260 return -EPERM;
268 v86 = (struct vm86plus_struct __user *)arg; 261 v86 = (struct vm86plus_struct __user *)arg;
269 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, 262 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
270 offsetof(struct kernel_vm86_struct, regs32) - 263 offsetof(struct kernel_vm86_struct, regs32) -
271 sizeof(info.regs)); 264 sizeof(info.regs));
272 ret = -EFAULT;
273 if (tmp) 265 if (tmp)
274 goto out; 266 return -EFAULT;
275 info.regs32 = current_pt_regs(); 267 info.regs32 = current_pt_regs();
276 info.vm86plus.is_vm86pus = 1; 268 info.vm86plus.is_vm86pus = 1;
277 tsk->thread.vm86_info = (struct vm86_struct __user *)v86; 269 tsk->thread.vm86_info = (struct vm86_struct __user *)v86;
278 do_sys_vm86(&info, tsk); 270 do_sys_vm86(&info, tsk);
279 ret = 0; /* we never return here */ 271 return 0; /* we never return here */
280out:
281 return ret;
282} 272}
283 273
284 274
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 8e517bba6a7c..8db0010ed150 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -60,6 +60,7 @@
60#define OpGS 25ull /* GS */ 60#define OpGS 25ull /* GS */
61#define OpMem8 26ull /* 8-bit zero extended memory operand */ 61#define OpMem8 26ull /* 8-bit zero extended memory operand */
62#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ 62#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
63 64
64#define OpBits 5 /* Width of operand field */ 65#define OpBits 5 /* Width of operand field */
65#define OpMask ((1ull << OpBits) - 1) 66#define OpMask ((1ull << OpBits) - 1)
@@ -99,6 +100,7 @@
99#define SrcImmUByte (OpImmUByte << SrcShift) 100#define SrcImmUByte (OpImmUByte << SrcShift)
100#define SrcImmU (OpImmU << SrcShift) 101#define SrcImmU (OpImmU << SrcShift)
101#define SrcSI (OpSI << SrcShift) 102#define SrcSI (OpSI << SrcShift)
103#define SrcXLat (OpXLat << SrcShift)
102#define SrcImmFAddr (OpImmFAddr << SrcShift) 104#define SrcImmFAddr (OpImmFAddr << SrcShift)
103#define SrcMemFAddr (OpMemFAddr << SrcShift) 105#define SrcMemFAddr (OpMemFAddr << SrcShift)
104#define SrcAcc (OpAcc << SrcShift) 106#define SrcAcc (OpAcc << SrcShift)
@@ -533,6 +535,9 @@ FOP_SETCC(setle)
533FOP_SETCC(setnle) 535FOP_SETCC(setnle)
534FOP_END; 536FOP_END;
535 537
538FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
539FOP_END;
540
536#define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \ 541#define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \
537 do { \ 542 do { \
538 unsigned long _tmp; \ 543 unsigned long _tmp; \
@@ -2996,6 +3001,28 @@ static int em_das(struct x86_emulate_ctxt *ctxt)
2996 return X86EMUL_CONTINUE; 3001 return X86EMUL_CONTINUE;
2997} 3002}
2998 3003
3004static int em_aam(struct x86_emulate_ctxt *ctxt)
3005{
3006 u8 al, ah;
3007
3008 if (ctxt->src.val == 0)
3009 return emulate_de(ctxt);
3010
3011 al = ctxt->dst.val & 0xff;
3012 ah = al / ctxt->src.val;
3013 al %= ctxt->src.val;
3014
3015 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3016
3017 /* Set PF, ZF, SF */
3018 ctxt->src.type = OP_IMM;
3019 ctxt->src.val = 0;
3020 ctxt->src.bytes = 1;
3021 fastop(ctxt, em_or);
3022
3023 return X86EMUL_CONTINUE;
3024}
3025
2999static int em_aad(struct x86_emulate_ctxt *ctxt) 3026static int em_aad(struct x86_emulate_ctxt *ctxt)
3000{ 3027{
3001 u8 al = ctxt->dst.val & 0xff; 3028 u8 al = ctxt->dst.val & 0xff;
@@ -3936,7 +3963,10 @@ static const struct opcode opcode_table[256] = {
3936 /* 0xD0 - 0xD7 */ 3963 /* 0xD0 - 0xD7 */
3937 G(Src2One | ByteOp, group2), G(Src2One, group2), 3964 G(Src2One | ByteOp, group2), G(Src2One, group2),
3938 G(Src2CL | ByteOp, group2), G(Src2CL, group2), 3965 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
3939 N, I(DstAcc | SrcImmByte | No64, em_aad), N, N, 3966 I(DstAcc | SrcImmUByte | No64, em_aam),
3967 I(DstAcc | SrcImmUByte | No64, em_aad),
3968 F(DstAcc | ByteOp | No64, em_salc),
3969 I(DstAcc | SrcXLat | ByteOp, em_mov),
3940 /* 0xD8 - 0xDF */ 3970 /* 0xD8 - 0xDF */
3941 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, 3971 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
3942 /* 0xE0 - 0xE7 */ 3972 /* 0xE0 - 0xE7 */
@@ -4198,6 +4228,16 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4198 op->val = 0; 4228 op->val = 0;
4199 op->count = 1; 4229 op->count = 1;
4200 break; 4230 break;
4231 case OpXLat:
4232 op->type = OP_MEM;
4233 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4234 op->addr.mem.ea =
4235 register_address(ctxt,
4236 reg_read(ctxt, VCPU_REGS_RBX) +
4237 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4238 op->addr.mem.seg = seg_override(ctxt);
4239 op->val = 0;
4240 break;
4201 case OpImmFAddr: 4241 case OpImmFAddr:
4202 op->type = OP_IMM; 4242 op->type = OP_IMM;
4203 op->addr.mem.ea = ctxt->_eip; 4243 op->addr.mem.ea = ctxt->_eip;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 25a791ed21c8..260a91939555 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5434,6 +5434,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
5434 return 0; 5434 return 0;
5435 } 5435 }
5436 5436
5437 if (vcpu->arch.halt_request) {
5438 vcpu->arch.halt_request = 0;
5439 ret = kvm_emulate_halt(vcpu);
5440 goto out;
5441 }
5442
5437 if (signal_pending(current)) 5443 if (signal_pending(current))
5438 goto out; 5444 goto out;
5439 if (need_resched()) 5445 if (need_resched())
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 05a8b1a2300d..094b5d96ab14 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -555,6 +555,25 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
555} 555}
556EXPORT_SYMBOL_GPL(kvm_lmsw); 556EXPORT_SYMBOL_GPL(kvm_lmsw);
557 557
558static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
559{
560 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
561 !vcpu->guest_xcr0_loaded) {
562 /* kvm_set_xcr() also depends on this */
563 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
564 vcpu->guest_xcr0_loaded = 1;
565 }
566}
567
568static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
569{
570 if (vcpu->guest_xcr0_loaded) {
571 if (vcpu->arch.xcr0 != host_xcr0)
572 xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
573 vcpu->guest_xcr0_loaded = 0;
574 }
575}
576
558int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) 577int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
559{ 578{
560 u64 xcr0; 579 u64 xcr0;
@@ -571,8 +590,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
571 return 1; 590 return 1;
572 if (xcr0 & ~host_xcr0) 591 if (xcr0 & ~host_xcr0)
573 return 1; 592 return 1;
593 kvm_put_guest_xcr0(vcpu);
574 vcpu->arch.xcr0 = xcr0; 594 vcpu->arch.xcr0 = xcr0;
575 vcpu->guest_xcr0_loaded = 0;
576 return 0; 595 return 0;
577} 596}
578 597
@@ -5614,25 +5633,6 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
5614 } 5633 }
5615} 5634}
5616 5635
5617static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
5618{
5619 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
5620 !vcpu->guest_xcr0_loaded) {
5621 /* kvm_set_xcr() also depends on this */
5622 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
5623 vcpu->guest_xcr0_loaded = 1;
5624 }
5625}
5626
5627static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
5628{
5629 if (vcpu->guest_xcr0_loaded) {
5630 if (vcpu->arch.xcr0 != host_xcr0)
5631 xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
5632 vcpu->guest_xcr0_loaded = 0;
5633 }
5634}
5635
5636static void process_nmi(struct kvm_vcpu *vcpu) 5636static void process_nmi(struct kvm_vcpu *vcpu)
5637{ 5637{
5638 unsigned limit = 2; 5638 unsigned limit = 2;
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 4a9be6ddf054..48e8461057ba 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -295,11 +295,10 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
295 int pos; 295 int pos;
296 u32 table_offset, bir; 296 u32 table_offset, bir;
297 297
298 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 298 pos = dev->msix_cap;
299
300 pci_read_config_dword(dev, pos + PCI_MSIX_TABLE, 299 pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
301 &table_offset); 300 &table_offset);
302 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); 301 bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
303 302
304 map_irq.table_base = pci_resource_start(dev, bir); 303 map_irq.table_base = pci_resource_start(dev, bir);
305 map_irq.entry_nr = msidesc->msi_attrib.entry_nr; 304 map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index d0d59bfbccce..aabfb8380a1c 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -345,7 +345,7 @@
345336 i386 perf_event_open sys_perf_event_open 345336 i386 perf_event_open sys_perf_event_open
346337 i386 recvmmsg sys_recvmmsg compat_sys_recvmmsg 346337 i386 recvmmsg sys_recvmmsg compat_sys_recvmmsg
347338 i386 fanotify_init sys_fanotify_init 347338 i386 fanotify_init sys_fanotify_init
348339 i386 fanotify_mark sys_fanotify_mark sys32_fanotify_mark 348339 i386 fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
349340 i386 prlimit64 sys_prlimit64 349340 i386 prlimit64 sys_prlimit64
350341 i386 name_to_handle_at sys_name_to_handle_at 350341 i386 name_to_handle_at sys_name_to_handle_at
351342 i386 open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at 351342 i386 open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 53d4f680c9b5..a492be2635ac 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -85,7 +85,29 @@
85 85
86EXPORT_SYMBOL_GPL(hypercall_page); 86EXPORT_SYMBOL_GPL(hypercall_page);
87 87
88/*
89 * Pointer to the xen_vcpu_info structure or
90 * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
91 * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
92 * but if the hypervisor supports VCPUOP_register_vcpu_info then it can point
93 * to xen_vcpu_info. The pointer is used in __xen_evtchn_do_upcall to
94 * acknowledge pending events.
95 * Also more subtly it is used by the patched version of irq enable/disable
96 * e.g. xen_irq_enable_direct and xen_iret in PV mode.
97 *
98 * The desire to be able to do those mask/unmask operations as a single
99 * instruction by using the per-cpu offset held in %gs is the real reason
100 * vcpu info is in a per-cpu pointer and the original reason for this
101 * hypercall.
102 *
103 */
88DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 104DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
105
106/*
107 * Per CPU pages used if hypervisor supports VCPUOP_register_vcpu_info
108 * hypercall. This can be used both in PV and PVHVM mode. The structure
109 * overrides the default per_cpu(xen_vcpu, cpu) value.
110 */
89DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); 111DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
90 112
91enum xen_domain_type xen_domain_type = XEN_NATIVE; 113enum xen_domain_type xen_domain_type = XEN_NATIVE;
@@ -157,6 +179,21 @@ static void xen_vcpu_setup(int cpu)
157 179
158 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); 180 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
159 181
182 /*
183 * This path is called twice on PVHVM - first during bootup via
184 * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
185 * hotplugged: cpu_up -> xen_hvm_cpu_notify.
186 * As we can only do the VCPUOP_register_vcpu_info once lets
187 * not over-write its result.
188 *
189 * For PV it is called during restore (xen_vcpu_restore) and bootup
190 * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
191 * use this function.
192 */
193 if (xen_hvm_domain()) {
194 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
195 return;
196 }
160 if (cpu < MAX_VIRT_CPUS) 197 if (cpu < MAX_VIRT_CPUS)
161 per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 198 per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
162 199
@@ -172,7 +209,12 @@ static void xen_vcpu_setup(int cpu)
172 209
173 /* Check to see if the hypervisor will put the vcpu_info 210 /* Check to see if the hypervisor will put the vcpu_info
174 structure where we want it, which allows direct access via 211 structure where we want it, which allows direct access via
175 a percpu-variable. */ 212 a percpu-variable.
213 N.B. This hypercall can _only_ be called once per CPU. Subsequent
214 calls will error out with -EINVAL. This is due to the fact that
215 hypervisor has no unregister variant and this hypercall does not
216 allow to over-write info.mfn and info.offset.
217 */
176 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); 218 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
177 219
178 if (err) { 220 if (err) {
@@ -387,6 +429,9 @@ static void __init xen_init_cpuid_mask(void)
387 cpuid_leaf1_edx_mask &= 429 cpuid_leaf1_edx_mask &=
388 ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ 430 ~((1 << X86_FEATURE_APIC) | /* disable local APIC */
389 (1 << X86_FEATURE_ACPI)); /* disable ACPI */ 431 (1 << X86_FEATURE_ACPI)); /* disable ACPI */
432
433 cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32));
434
390 ax = 1; 435 ax = 1;
391 cx = 0; 436 cx = 0;
392 xen_cpuid(&ax, &bx, &cx, &dx); 437 xen_cpuid(&ax, &bx, &cx, &dx);
@@ -1603,6 +1648,9 @@ void __ref xen_hvm_init_shared_info(void)
1603 * online but xen_hvm_init_shared_info is run at resume time too and 1648 * online but xen_hvm_init_shared_info is run at resume time too and
1604 * in that case multiple vcpus might be online. */ 1649 * in that case multiple vcpus might be online. */
1605 for_each_online_cpu(cpu) { 1650 for_each_online_cpu(cpu) {
1651 /* Leave it to be NULL. */
1652 if (cpu >= MAX_VIRT_CPUS)
1653 continue;
1606 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 1654 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
1607 } 1655 }
1608} 1656}
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 8b54603ce816..3002ec1bb71a 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -364,7 +364,7 @@ void __cpuinit xen_init_lock_cpu(int cpu)
364 int irq; 364 int irq;
365 const char *name; 365 const char *name;
366 366
367 WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n", 367 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
368 cpu, per_cpu(lock_kicker_irq, cpu)); 368 cpu, per_cpu(lock_kicker_irq, cpu));
369 369
370 /* 370 /*
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 3a8f7e6db295..e7e92429d10f 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -78,6 +78,10 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
78{ 78{
79 struct drm_crtc *crtc; 79 struct drm_crtc *crtc;
80 80
81 /* Locking is currently fubar in the panic handler. */
82 if (oops_in_progress)
83 return;
84
81 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 85 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
82 WARN_ON(!mutex_is_locked(&crtc->mutex)); 86 WARN_ON(!mutex_is_locked(&crtc->mutex));
83 87
@@ -246,6 +250,7 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
246 else 250 else
247 return "unknown"; 251 return "unknown";
248} 252}
253EXPORT_SYMBOL(drm_get_connector_status_name);
249 254
250/** 255/**
251 * drm_mode_object_get - allocate a new modeset identifier 256 * drm_mode_object_get - allocate a new modeset identifier
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index e974f9309b72..ed1334e27c33 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -121,6 +121,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
121 connector->helper_private; 121 connector->helper_private;
122 int count = 0; 122 int count = 0;
123 int mode_flags = 0; 123 int mode_flags = 0;
124 bool verbose_prune = true;
124 125
125 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 126 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
126 drm_get_connector_name(connector)); 127 drm_get_connector_name(connector));
@@ -149,6 +150,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
149 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", 150 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
150 connector->base.id, drm_get_connector_name(connector)); 151 connector->base.id, drm_get_connector_name(connector));
151 drm_mode_connector_update_edid_property(connector, NULL); 152 drm_mode_connector_update_edid_property(connector, NULL);
153 verbose_prune = false;
152 goto prune; 154 goto prune;
153 } 155 }
154 156
@@ -182,7 +184,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
182 } 184 }
183 185
184prune: 186prune:
185 drm_mode_prune_invalid(dev, &connector->modes, true); 187 drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
186 188
187 if (list_empty(&connector->modes)) 189 if (list_empty(&connector->modes))
188 return 0; 190 return 0;
@@ -1005,12 +1007,20 @@ static void output_poll_execute(struct work_struct *work)
1005 continue; 1007 continue;
1006 1008
1007 connector->status = connector->funcs->detect(connector, false); 1009 connector->status = connector->funcs->detect(connector, false);
1008 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 1010 if (old_status != connector->status) {
1009 connector->base.id, 1011 const char *old, *new;
1010 drm_get_connector_name(connector), 1012
1011 old_status, connector->status); 1013 old = drm_get_connector_status_name(old_status);
1012 if (old_status != connector->status) 1014 new = drm_get_connector_status_name(connector->status);
1015
1016 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
1017 "status updated from %s to %s\n",
1018 connector->base.id,
1019 drm_get_connector_name(connector),
1020 old, new);
1021
1013 changed = true; 1022 changed = true;
1023 }
1014 } 1024 }
1015 1025
1016 mutex_unlock(&dev->mode_config.mutex); 1026 mutex_unlock(&dev->mode_config.mutex);
@@ -1083,10 +1093,11 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
1083 old_status = connector->status; 1093 old_status = connector->status;
1084 1094
1085 connector->status = connector->funcs->detect(connector, false); 1095 connector->status = connector->funcs->detect(connector, false);
1086 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 1096 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
1087 connector->base.id, 1097 connector->base.id,
1088 drm_get_connector_name(connector), 1098 drm_get_connector_name(connector),
1089 old_status, connector->status); 1099 drm_get_connector_status_name(old_status),
1100 drm_get_connector_status_name(connector->status));
1090 if (old_status != connector->status) 1101 if (old_status != connector->status)
1091 changed = true; 1102 changed = true;
1092 } 1103 }
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 8d4f29075af5..9cc247f55502 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -57,7 +57,7 @@ static int drm_version(struct drm_device *dev, void *data,
57 struct drm_file *file_priv); 57 struct drm_file *file_priv);
58 58
59#define DRM_IOCTL_DEF(ioctl, _func, _flags) \ 59#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
60 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0} 60 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
61 61
62/** Ioctl table */ 62/** Ioctl table */
63static const struct drm_ioctl_desc drm_ioctls[] = { 63static const struct drm_ioctl_desc drm_ioctls[] = {
@@ -375,7 +375,7 @@ long drm_ioctl(struct file *filp,
375{ 375{
376 struct drm_file *file_priv = filp->private_data; 376 struct drm_file *file_priv = filp->private_data;
377 struct drm_device *dev; 377 struct drm_device *dev;
378 const struct drm_ioctl_desc *ioctl; 378 const struct drm_ioctl_desc *ioctl = NULL;
379 drm_ioctl_t *func; 379 drm_ioctl_t *func;
380 unsigned int nr = DRM_IOCTL_NR(cmd); 380 unsigned int nr = DRM_IOCTL_NR(cmd);
381 int retcode = -EINVAL; 381 int retcode = -EINVAL;
@@ -392,11 +392,6 @@ long drm_ioctl(struct file *filp,
392 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); 392 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
393 ++file_priv->ioctl_count; 393 ++file_priv->ioctl_count;
394 394
395 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
396 task_pid_nr(current), cmd, nr,
397 (long)old_encode_dev(file_priv->minor->device),
398 file_priv->authenticated);
399
400 if ((nr >= DRM_CORE_IOCTL_COUNT) && 395 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
401 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) 396 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
402 goto err_i1; 397 goto err_i1;
@@ -417,6 +412,11 @@ long drm_ioctl(struct file *filp,
417 } else 412 } else
418 goto err_i1; 413 goto err_i1;
419 414
415 DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
416 task_pid_nr(current),
417 (long)old_encode_dev(file_priv->minor->device),
418 file_priv->authenticated, ioctl->name);
419
420 /* Do not trust userspace, use our own definition */ 420 /* Do not trust userspace, use our own definition */
421 func = ioctl->func; 421 func = ioctl->func;
422 /* is there a local override? */ 422 /* is there a local override? */
@@ -471,6 +471,12 @@ long drm_ioctl(struct file *filp,
471 } 471 }
472 472
473 err_i1: 473 err_i1:
474 if (!ioctl)
475 DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
476 task_pid_nr(current),
477 (long)old_encode_dev(file_priv->minor->device),
478 file_priv->authenticated, cmd, nr);
479
474 if (kdata != stack_kdata) 480 if (kdata != stack_kdata)
475 kfree(kdata); 481 kfree(kdata);
476 atomic_dec(&dev->ioctl_count); 482 atomic_dec(&dev->ioctl_count);
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index 48c52f7df4e6..0cfb60f54766 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -54,16 +54,12 @@ int drm_i2c_encoder_init(struct drm_device *dev,
54 struct i2c_adapter *adap, 54 struct i2c_adapter *adap,
55 const struct i2c_board_info *info) 55 const struct i2c_board_info *info)
56{ 56{
57 char modalias[sizeof(I2C_MODULE_PREFIX)
58 + I2C_NAME_SIZE];
59 struct module *module = NULL; 57 struct module *module = NULL;
60 struct i2c_client *client; 58 struct i2c_client *client;
61 struct drm_i2c_encoder_driver *encoder_drv; 59 struct drm_i2c_encoder_driver *encoder_drv;
62 int err = 0; 60 int err = 0;
63 61
64 snprintf(modalias, sizeof(modalias), 62 request_module("%s%s", I2C_MODULE_PREFIX, info->type);
65 "%s%s", I2C_MODULE_PREFIX, info->type);
66 request_module(modalias);
67 63
68 client = i2c_new_device(adap, info); 64 client = i2c_new_device(adap, info);
69 if (!client) { 65 if (!client) {
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index db1e2d6f90d7..07cf99cc8862 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -755,33 +755,35 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
755EXPORT_SYMBOL(drm_mm_debug_table); 755EXPORT_SYMBOL(drm_mm_debug_table);
756 756
757#if defined(CONFIG_DEBUG_FS) 757#if defined(CONFIG_DEBUG_FS)
758int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 758static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
759{ 759{
760 struct drm_mm_node *entry;
761 unsigned long total_used = 0, total_free = 0, total = 0;
762 unsigned long hole_start, hole_end, hole_size; 760 unsigned long hole_start, hole_end, hole_size;
763 761
764 hole_start = drm_mm_hole_node_start(&mm->head_node); 762 if (entry->hole_follows) {
765 hole_end = drm_mm_hole_node_end(&mm->head_node); 763 hole_start = drm_mm_hole_node_start(entry);
766 hole_size = hole_end - hole_start; 764 hole_end = drm_mm_hole_node_end(entry);
767 if (hole_size) 765 hole_size = hole_end - hole_start;
768 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", 766 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
769 hole_start, hole_end, hole_size); 767 hole_start, hole_end, hole_size);
770 total_free += hole_size; 768 return hole_size;
769 }
770
771 return 0;
772}
773
774int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
775{
776 struct drm_mm_node *entry;
777 unsigned long total_used = 0, total_free = 0, total = 0;
778
779 total_free += drm_mm_dump_hole(m, &mm->head_node);
771 780
772 drm_mm_for_each_node(entry, mm) { 781 drm_mm_for_each_node(entry, mm) {
773 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", 782 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
774 entry->start, entry->start + entry->size, 783 entry->start, entry->start + entry->size,
775 entry->size); 784 entry->size);
776 total_used += entry->size; 785 total_used += entry->size;
777 if (entry->hole_follows) { 786 total_free += drm_mm_dump_hole(m, entry);
778 hole_start = drm_mm_hole_node_start(entry);
779 hole_end = drm_mm_hole_node_end(entry);
780 hole_size = hole_end - hole_start;
781 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
782 hole_start, hole_end, hole_size);
783 total_free += hole_size;
784 }
785 } 787 }
786 total = total_free + total_used; 788 total = total_free + total_used;
787 789
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index faa79df02648..a371ff865a88 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1143,6 +1143,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1143 was_digit = false; 1143 was_digit = false;
1144 } else 1144 } else
1145 goto done; 1145 goto done;
1146 break;
1146 case '0' ... '9': 1147 case '0' ... '9':
1147 was_digit = true; 1148 was_digit = true;
1148 break; 1149 break;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6be940effefd..6165535d15f0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1045,6 +1045,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1045 if (timeout) { 1045 if (timeout) {
1046 struct timespec sleep_time = timespec_sub(now, before); 1046 struct timespec sleep_time = timespec_sub(now, before);
1047 *timeout = timespec_sub(*timeout, sleep_time); 1047 *timeout = timespec_sub(*timeout, sleep_time);
1048 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1049 set_normalized_timespec(timeout, 0, 0);
1048 } 1050 }
1049 1051
1050 switch (end) { 1052 switch (end) {
@@ -1053,8 +1055,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1053 case -ERESTARTSYS: /* Signal */ 1055 case -ERESTARTSYS: /* Signal */
1054 return (int)end; 1056 return (int)end;
1055 case 0: /* Timeout */ 1057 case 0: /* Timeout */
1056 if (timeout)
1057 set_normalized_timespec(timeout, 0, 0);
1058 return -ETIME; 1058 return -ETIME;
1059 default: /* Completed */ 1059 default: /* Completed */
1060 WARN_ON(end < 0); /* We're not aware of other errors */ 1060 WARN_ON(end < 0); /* We're not aware of other errors */
@@ -2377,10 +2377,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2377 mutex_unlock(&dev->struct_mutex); 2377 mutex_unlock(&dev->struct_mutex);
2378 2378
2379 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); 2379 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2380 if (timeout) { 2380 if (timeout)
2381 WARN_ON(!timespec_valid(timeout));
2382 args->timeout_ns = timespec_to_ns(timeout); 2381 args->timeout_ns = timespec_to_ns(timeout);
2383 }
2384 return ret; 2382 return ret;
2385 2383
2386out: 2384out:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index dca614de71b6..bdb0d7717bc7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -709,15 +709,6 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
709 return snb_gmch_ctl << 25; /* 32 MB units */ 709 return snb_gmch_ctl << 25; /* 32 MB units */
710} 710}
711 711
712static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
713{
714 static const int stolen_decoder[] = {
715 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
716 snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
717 snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
718 return stolen_decoder[snb_gmch_ctl] << 20;
719}
720
721static int gen6_gmch_probe(struct drm_device *dev, 712static int gen6_gmch_probe(struct drm_device *dev,
722 size_t *gtt_total, 713 size_t *gtt_total,
723 size_t *stolen, 714 size_t *stolen,
@@ -747,11 +738,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
747 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 738 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
748 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); 739 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
749 740
750 if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) 741 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
751 *stolen = gen7_get_stolen_size(snb_gmch_ctl);
752 else
753 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
754
755 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; 742 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
756 743
757 /* For Modern GENs the PTEs and register space are split in the BAR */ 744 /* For Modern GENs the PTEs and register space are split in the BAR */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 83f9c26e1adb..2d6b62e42daf 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -46,8 +46,6 @@
46#define SNB_GMCH_GGMS_MASK 0x3 46#define SNB_GMCH_GGMS_MASK 0x3
47#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ 47#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
48#define SNB_GMCH_GMS_MASK 0x1f 48#define SNB_GMCH_GMS_MASK 0x1f
49#define IVB_GMCH_GMS_SHIFT 4
50#define IVB_GMCH_GMS_MASK 0xf
51 49
52 50
53/* PCI config space */ 51/* PCI config space */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 26a0a570f92e..fb961bb81903 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1265,6 +1265,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1265 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1265 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1266 intel_dp_start_link_train(intel_dp); 1266 intel_dp_start_link_train(intel_dp);
1267 intel_dp_complete_link_train(intel_dp); 1267 intel_dp_complete_link_train(intel_dp);
1268 if (port != PORT_A)
1269 intel_dp_stop_link_train(intel_dp);
1268 } 1270 }
1269} 1271}
1270 1272
@@ -1326,6 +1328,9 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1326 } else if (type == INTEL_OUTPUT_EDP) { 1328 } else if (type == INTEL_OUTPUT_EDP) {
1327 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1329 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1328 1330
1331 if (port == PORT_A)
1332 intel_dp_stop_link_train(intel_dp);
1333
1329 ironlake_edp_backlight_on(intel_dp); 1334 ironlake_edp_backlight_on(intel_dp);
1330 } 1335 }
1331 1336
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index fb2fbc1e08b9..3d704b706a8d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -702,6 +702,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
702 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 702 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
703 * bpc in between. */ 703 * bpc in between. */
704 bpp = min_t(int, 8*3, pipe_config->pipe_bpp); 704 bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
705 if (is_edp(intel_dp) && dev_priv->edp.bpp)
706 bpp = min_t(int, bpp, dev_priv->edp.bpp);
707
705 for (; bpp >= 6*3; bpp -= 2*3) { 708 for (; bpp >= 6*3; bpp -= 2*3) {
706 mode_rate = intel_dp_link_required(target_clock, bpp); 709 mode_rate = intel_dp_link_required(target_clock, bpp);
707 710
@@ -739,6 +742,7 @@ found:
739 intel_dp->link_bw = bws[clock]; 742 intel_dp->link_bw = bws[clock];
740 intel_dp->lane_count = lane_count; 743 intel_dp->lane_count = lane_count;
741 adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); 744 adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
745 pipe_config->pipe_bpp = bpp;
742 pipe_config->pixel_target_clock = target_clock; 746 pipe_config->pixel_target_clock = target_clock;
743 747
744 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", 748 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
@@ -751,20 +755,6 @@ found:
751 target_clock, adjusted_mode->clock, 755 target_clock, adjusted_mode->clock,
752 &pipe_config->dp_m_n); 756 &pipe_config->dp_m_n);
753 757
754 /*
755 * XXX: We have a strange regression where using the vbt edp bpp value
756 * for the link bw computation results in black screens, the panel only
757 * works when we do the computation at the usual 24bpp (but still
758 * requires us to use 18bpp). Until that's fully debugged, stay
759 * bug-for-bug compatible with the old code.
760 */
761 if (is_edp(intel_dp) && dev_priv->edp.bpp) {
762 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n",
763 bpp, dev_priv->edp.bpp);
764 bpp = min_t(int, bpp, dev_priv->edp.bpp);
765 }
766 pipe_config->pipe_bpp = bpp;
767
768 return true; 758 return true;
769} 759}
770 760
@@ -1389,6 +1379,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
1389 ironlake_edp_panel_on(intel_dp); 1379 ironlake_edp_panel_on(intel_dp);
1390 ironlake_edp_panel_vdd_off(intel_dp, true); 1380 ironlake_edp_panel_vdd_off(intel_dp, true);
1391 intel_dp_complete_link_train(intel_dp); 1381 intel_dp_complete_link_train(intel_dp);
1382 intel_dp_stop_link_train(intel_dp);
1392 ironlake_edp_backlight_on(intel_dp); 1383 ironlake_edp_backlight_on(intel_dp);
1393} 1384}
1394 1385
@@ -1711,10 +1702,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1711 struct drm_i915_private *dev_priv = dev->dev_private; 1702 struct drm_i915_private *dev_priv = dev->dev_private;
1712 enum port port = intel_dig_port->port; 1703 enum port port = intel_dig_port->port;
1713 int ret; 1704 int ret;
1714 uint32_t temp;
1715 1705
1716 if (HAS_DDI(dev)) { 1706 if (HAS_DDI(dev)) {
1717 temp = I915_READ(DP_TP_CTL(port)); 1707 uint32_t temp = I915_READ(DP_TP_CTL(port));
1718 1708
1719 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 1709 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
1720 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 1710 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -1724,18 +1714,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1724 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1714 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1725 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1715 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1726 case DP_TRAINING_PATTERN_DISABLE: 1716 case DP_TRAINING_PATTERN_DISABLE:
1727
1728 if (port != PORT_A) {
1729 temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
1730 I915_WRITE(DP_TP_CTL(port), temp);
1731
1732 if (wait_for((I915_READ(DP_TP_STATUS(port)) &
1733 DP_TP_STATUS_IDLE_DONE), 1))
1734 DRM_ERROR("Timed out waiting for DP idle patterns\n");
1735
1736 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1737 }
1738
1739 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 1717 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
1740 1718
1741 break; 1719 break;
@@ -1811,6 +1789,37 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1811 return true; 1789 return true;
1812} 1790}
1813 1791
1792static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
1793{
1794 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1795 struct drm_device *dev = intel_dig_port->base.base.dev;
1796 struct drm_i915_private *dev_priv = dev->dev_private;
1797 enum port port = intel_dig_port->port;
1798 uint32_t val;
1799
1800 if (!HAS_DDI(dev))
1801 return;
1802
1803 val = I915_READ(DP_TP_CTL(port));
1804 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1805 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
1806 I915_WRITE(DP_TP_CTL(port), val);
1807
1808 /*
1809 * On PORT_A we can have only eDP in SST mode. There the only reason
1810 * we need to set idle transmission mode is to work around a HW issue
1811 * where we enable the pipe while not in idle link-training mode.
1812 * In this case there is requirement to wait for a minimum number of
1813 * idle patterns to be sent.
1814 */
1815 if (port == PORT_A)
1816 return;
1817
1818 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
1819 1))
1820 DRM_ERROR("Timed out waiting for DP idle patterns\n");
1821}
1822
1814/* Enable corresponding port and start training pattern 1 */ 1823/* Enable corresponding port and start training pattern 1 */
1815void 1824void
1816intel_dp_start_link_train(struct intel_dp *intel_dp) 1825intel_dp_start_link_train(struct intel_dp *intel_dp)
@@ -1953,10 +1962,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1953 ++tries; 1962 ++tries;
1954 } 1963 }
1955 1964
1965 intel_dp_set_idle_link_train(intel_dp);
1966
1967 intel_dp->DP = DP;
1968
1956 if (channel_eq) 1969 if (channel_eq)
1957 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); 1970 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
1958 1971
1959 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); 1972}
1973
1974void intel_dp_stop_link_train(struct intel_dp *intel_dp)
1975{
1976 intel_dp_set_link_train(intel_dp, intel_dp->DP,
1977 DP_TRAINING_PATTERN_DISABLE);
1960} 1978}
1961 1979
1962static void 1980static void
@@ -2164,6 +2182,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
2164 drm_get_encoder_name(&intel_encoder->base)); 2182 drm_get_encoder_name(&intel_encoder->base));
2165 intel_dp_start_link_train(intel_dp); 2183 intel_dp_start_link_train(intel_dp);
2166 intel_dp_complete_link_train(intel_dp); 2184 intel_dp_complete_link_train(intel_dp);
2185 intel_dp_stop_link_train(intel_dp);
2167 } 2186 }
2168} 2187}
2169 2188
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b5b6d19e6dd3..624a9e6b8d71 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -499,6 +499,7 @@ extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
499extern void intel_dp_init_link_config(struct intel_dp *intel_dp); 499extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
500extern void intel_dp_start_link_train(struct intel_dp *intel_dp); 500extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
501extern void intel_dp_complete_link_train(struct intel_dp *intel_dp); 501extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
502extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
502extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); 503extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
503extern void intel_dp_encoder_destroy(struct drm_encoder *encoder); 504extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
504extern void intel_dp_check_link_status(struct intel_dp *intel_dp); 505extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 0e19e575a1b4..6b7c3ca2c035 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -262,10 +262,22 @@ void intel_fbdev_fini(struct drm_device *dev)
262void intel_fbdev_set_suspend(struct drm_device *dev, int state) 262void intel_fbdev_set_suspend(struct drm_device *dev, int state)
263{ 263{
264 drm_i915_private_t *dev_priv = dev->dev_private; 264 drm_i915_private_t *dev_priv = dev->dev_private;
265 if (!dev_priv->fbdev) 265 struct intel_fbdev *ifbdev = dev_priv->fbdev;
266 struct fb_info *info;
267
268 if (!ifbdev)
266 return; 269 return;
267 270
268 fb_set_suspend(dev_priv->fbdev->helper.fbdev, state); 271 info = ifbdev->helper.fbdev;
272
273 /* On resume from hibernation: If the object is shmemfs backed, it has
274 * been restored from swap. If the object is stolen however, it will be
275 * full of whatever garbage was left in there.
276 */
277 if (!state && ifbdev->ifb.obj->stolen)
278 memset_io(info->screen_base, 0, info->screen_size);
279
280 fb_set_suspend(info, state);
269} 281}
270 282
271MODULE_LICENSE("GPL and additional rights"); 283MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index de3b0dc5658b..aa01128ff192 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1301,17 +1301,17 @@ static void valleyview_update_wm(struct drm_device *dev)
1301 1301
1302 vlv_update_drain_latency(dev); 1302 vlv_update_drain_latency(dev);
1303 1303
1304 if (g4x_compute_wm0(dev, 0, 1304 if (g4x_compute_wm0(dev, PIPE_A,
1305 &valleyview_wm_info, latency_ns, 1305 &valleyview_wm_info, latency_ns,
1306 &valleyview_cursor_wm_info, latency_ns, 1306 &valleyview_cursor_wm_info, latency_ns,
1307 &planea_wm, &cursora_wm)) 1307 &planea_wm, &cursora_wm))
1308 enabled |= 1; 1308 enabled |= 1 << PIPE_A;
1309 1309
1310 if (g4x_compute_wm0(dev, 1, 1310 if (g4x_compute_wm0(dev, PIPE_B,
1311 &valleyview_wm_info, latency_ns, 1311 &valleyview_wm_info, latency_ns,
1312 &valleyview_cursor_wm_info, latency_ns, 1312 &valleyview_cursor_wm_info, latency_ns,
1313 &planeb_wm, &cursorb_wm)) 1313 &planeb_wm, &cursorb_wm))
1314 enabled |= 2; 1314 enabled |= 1 << PIPE_B;
1315 1315
1316 if (single_plane_enabled(enabled) && 1316 if (single_plane_enabled(enabled) &&
1317 g4x_compute_srwm(dev, ffs(enabled) - 1, 1317 g4x_compute_srwm(dev, ffs(enabled) - 1,
@@ -1357,17 +1357,17 @@ static void g4x_update_wm(struct drm_device *dev)
1357 int plane_sr, cursor_sr; 1357 int plane_sr, cursor_sr;
1358 unsigned int enabled = 0; 1358 unsigned int enabled = 0;
1359 1359
1360 if (g4x_compute_wm0(dev, 0, 1360 if (g4x_compute_wm0(dev, PIPE_A,
1361 &g4x_wm_info, latency_ns, 1361 &g4x_wm_info, latency_ns,
1362 &g4x_cursor_wm_info, latency_ns, 1362 &g4x_cursor_wm_info, latency_ns,
1363 &planea_wm, &cursora_wm)) 1363 &planea_wm, &cursora_wm))
1364 enabled |= 1; 1364 enabled |= 1 << PIPE_A;
1365 1365
1366 if (g4x_compute_wm0(dev, 1, 1366 if (g4x_compute_wm0(dev, PIPE_B,
1367 &g4x_wm_info, latency_ns, 1367 &g4x_wm_info, latency_ns,
1368 &g4x_cursor_wm_info, latency_ns, 1368 &g4x_cursor_wm_info, latency_ns,
1369 &planeb_wm, &cursorb_wm)) 1369 &planeb_wm, &cursorb_wm))
1370 enabled |= 2; 1370 enabled |= 1 << PIPE_B;
1371 1371
1372 if (single_plane_enabled(enabled) && 1372 if (single_plane_enabled(enabled) &&
1373 g4x_compute_srwm(dev, ffs(enabled) - 1, 1373 g4x_compute_srwm(dev, ffs(enabled) - 1,
@@ -1716,7 +1716,7 @@ static void ironlake_update_wm(struct drm_device *dev)
1716 unsigned int enabled; 1716 unsigned int enabled;
1717 1717
1718 enabled = 0; 1718 enabled = 0;
1719 if (g4x_compute_wm0(dev, 0, 1719 if (g4x_compute_wm0(dev, PIPE_A,
1720 &ironlake_display_wm_info, 1720 &ironlake_display_wm_info,
1721 ILK_LP0_PLANE_LATENCY, 1721 ILK_LP0_PLANE_LATENCY,
1722 &ironlake_cursor_wm_info, 1722 &ironlake_cursor_wm_info,
@@ -1727,10 +1727,10 @@ static void ironlake_update_wm(struct drm_device *dev)
1727 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1727 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1728 " plane %d, " "cursor: %d\n", 1728 " plane %d, " "cursor: %d\n",
1729 plane_wm, cursor_wm); 1729 plane_wm, cursor_wm);
1730 enabled |= 1; 1730 enabled |= 1 << PIPE_A;
1731 } 1731 }
1732 1732
1733 if (g4x_compute_wm0(dev, 1, 1733 if (g4x_compute_wm0(dev, PIPE_B,
1734 &ironlake_display_wm_info, 1734 &ironlake_display_wm_info,
1735 ILK_LP0_PLANE_LATENCY, 1735 ILK_LP0_PLANE_LATENCY,
1736 &ironlake_cursor_wm_info, 1736 &ironlake_cursor_wm_info,
@@ -1741,7 +1741,7 @@ static void ironlake_update_wm(struct drm_device *dev)
1741 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1741 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1742 " plane %d, cursor: %d\n", 1742 " plane %d, cursor: %d\n",
1743 plane_wm, cursor_wm); 1743 plane_wm, cursor_wm);
1744 enabled |= 2; 1744 enabled |= 1 << PIPE_B;
1745 } 1745 }
1746 1746
1747 /* 1747 /*
@@ -1801,7 +1801,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
1801 unsigned int enabled; 1801 unsigned int enabled;
1802 1802
1803 enabled = 0; 1803 enabled = 0;
1804 if (g4x_compute_wm0(dev, 0, 1804 if (g4x_compute_wm0(dev, PIPE_A,
1805 &sandybridge_display_wm_info, latency, 1805 &sandybridge_display_wm_info, latency,
1806 &sandybridge_cursor_wm_info, latency, 1806 &sandybridge_cursor_wm_info, latency,
1807 &plane_wm, &cursor_wm)) { 1807 &plane_wm, &cursor_wm)) {
@@ -1812,10 +1812,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
1812 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1812 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1813 " plane %d, " "cursor: %d\n", 1813 " plane %d, " "cursor: %d\n",
1814 plane_wm, cursor_wm); 1814 plane_wm, cursor_wm);
1815 enabled |= 1; 1815 enabled |= 1 << PIPE_A;
1816 } 1816 }
1817 1817
1818 if (g4x_compute_wm0(dev, 1, 1818 if (g4x_compute_wm0(dev, PIPE_B,
1819 &sandybridge_display_wm_info, latency, 1819 &sandybridge_display_wm_info, latency,
1820 &sandybridge_cursor_wm_info, latency, 1820 &sandybridge_cursor_wm_info, latency,
1821 &plane_wm, &cursor_wm)) { 1821 &plane_wm, &cursor_wm)) {
@@ -1826,7 +1826,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
1826 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1826 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1827 " plane %d, cursor: %d\n", 1827 " plane %d, cursor: %d\n",
1828 plane_wm, cursor_wm); 1828 plane_wm, cursor_wm);
1829 enabled |= 2; 1829 enabled |= 1 << PIPE_B;
1830 } 1830 }
1831 1831
1832 /* 1832 /*
@@ -1904,7 +1904,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
1904 unsigned int enabled; 1904 unsigned int enabled;
1905 1905
1906 enabled = 0; 1906 enabled = 0;
1907 if (g4x_compute_wm0(dev, 0, 1907 if (g4x_compute_wm0(dev, PIPE_A,
1908 &sandybridge_display_wm_info, latency, 1908 &sandybridge_display_wm_info, latency,
1909 &sandybridge_cursor_wm_info, latency, 1909 &sandybridge_cursor_wm_info, latency,
1910 &plane_wm, &cursor_wm)) { 1910 &plane_wm, &cursor_wm)) {
@@ -1915,10 +1915,10 @@ static void ivybridge_update_wm(struct drm_device *dev)
1915 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1915 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1916 " plane %d, " "cursor: %d\n", 1916 " plane %d, " "cursor: %d\n",
1917 plane_wm, cursor_wm); 1917 plane_wm, cursor_wm);
1918 enabled |= 1; 1918 enabled |= 1 << PIPE_A;
1919 } 1919 }
1920 1920
1921 if (g4x_compute_wm0(dev, 1, 1921 if (g4x_compute_wm0(dev, PIPE_B,
1922 &sandybridge_display_wm_info, latency, 1922 &sandybridge_display_wm_info, latency,
1923 &sandybridge_cursor_wm_info, latency, 1923 &sandybridge_cursor_wm_info, latency,
1924 &plane_wm, &cursor_wm)) { 1924 &plane_wm, &cursor_wm)) {
@@ -1929,10 +1929,10 @@ static void ivybridge_update_wm(struct drm_device *dev)
1929 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1929 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1930 " plane %d, cursor: %d\n", 1930 " plane %d, cursor: %d\n",
1931 plane_wm, cursor_wm); 1931 plane_wm, cursor_wm);
1932 enabled |= 2; 1932 enabled |= 1 << PIPE_B;
1933 } 1933 }
1934 1934
1935 if (g4x_compute_wm0(dev, 2, 1935 if (g4x_compute_wm0(dev, PIPE_C,
1936 &sandybridge_display_wm_info, latency, 1936 &sandybridge_display_wm_info, latency,
1937 &sandybridge_cursor_wm_info, latency, 1937 &sandybridge_cursor_wm_info, latency,
1938 &plane_wm, &cursor_wm)) { 1938 &plane_wm, &cursor_wm)) {
@@ -1943,7 +1943,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
1943 DRM_DEBUG_KMS("FIFO watermarks For pipe C -" 1943 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1944 " plane %d, cursor: %d\n", 1944 " plane %d, cursor: %d\n",
1945 plane_wm, cursor_wm); 1945 plane_wm, cursor_wm);
1946 enabled |= 3; 1946 enabled |= 1 << PIPE_C;
1947 } 1947 }
1948 1948
1949 /* 1949 /*
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index f9889658329b..77b8a45fb10a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -46,29 +46,26 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
46 46
47static inline void mga_wait_vsync(struct mga_device *mdev) 47static inline void mga_wait_vsync(struct mga_device *mdev)
48{ 48{
49 unsigned int count = 0; 49 unsigned long timeout = jiffies + HZ/10;
50 unsigned int status = 0; 50 unsigned int status = 0;
51 51
52 do { 52 do {
53 status = RREG32(MGAREG_Status); 53 status = RREG32(MGAREG_Status);
54 count++; 54 } while ((status & 0x08) && time_before(jiffies, timeout));
55 } while ((status & 0x08) && (count < 250000)); 55 timeout = jiffies + HZ/10;
56 count = 0;
57 status = 0; 56 status = 0;
58 do { 57 do {
59 status = RREG32(MGAREG_Status); 58 status = RREG32(MGAREG_Status);
60 count++; 59 } while (!(status & 0x08) && time_before(jiffies, timeout));
61 } while (!(status & 0x08) && (count < 250000));
62} 60}
63 61
64static inline void mga_wait_busy(struct mga_device *mdev) 62static inline void mga_wait_busy(struct mga_device *mdev)
65{ 63{
66 unsigned int count = 0; 64 unsigned long timeout = jiffies + HZ;
67 unsigned int status = 0; 65 unsigned int status = 0;
68 do { 66 do {
69 status = RREG8(MGAREG_Status + 2); 67 status = RREG8(MGAREG_Status + 2);
70 count++; 68 } while ((status & 0x01) && time_before(jiffies, timeout));
71 } while ((status & 0x01) && (count < 500000));
72} 69}
73 70
74/* 71/*
@@ -189,12 +186,12 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
189 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 186 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
190 tmp = RREG8(DAC_DATA); 187 tmp = RREG8(DAC_DATA);
191 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 188 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
192 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 189 WREG8(DAC_DATA, tmp);
193 190
194 WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 191 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
195 tmp = RREG8(DAC_DATA); 192 tmp = RREG8(DAC_DATA);
196 tmp |= MGA1064_REMHEADCTL_CLKDIS; 193 tmp |= MGA1064_REMHEADCTL_CLKDIS;
197 WREG_DAC(MGA1064_REMHEADCTL, tmp); 194 WREG8(DAC_DATA, tmp);
198 195
199 /* select PLL Set C */ 196 /* select PLL Set C */
200 tmp = RREG8(MGAREG_MEM_MISC_READ); 197 tmp = RREG8(MGAREG_MEM_MISC_READ);
@@ -204,7 +201,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
204 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 201 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
205 tmp = RREG8(DAC_DATA); 202 tmp = RREG8(DAC_DATA);
206 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80; 203 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
207 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 204 WREG8(DAC_DATA, tmp);
208 205
209 udelay(500); 206 udelay(500);
210 207
@@ -212,7 +209,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
212 WREG8(DAC_INDEX, MGA1064_VREF_CTL); 209 WREG8(DAC_INDEX, MGA1064_VREF_CTL);
213 tmp = RREG8(DAC_DATA); 210 tmp = RREG8(DAC_DATA);
214 tmp &= ~0x04; 211 tmp &= ~0x04;
215 WREG_DAC(MGA1064_VREF_CTL, tmp); 212 WREG8(DAC_DATA, tmp);
216 213
217 udelay(50); 214 udelay(50);
218 215
@@ -236,13 +233,13 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
236 tmp = RREG8(DAC_DATA); 233 tmp = RREG8(DAC_DATA);
237 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 234 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
238 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 235 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
239 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 236 WREG8(DAC_DATA, tmp);
240 237
241 WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 238 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
242 tmp = RREG8(DAC_DATA); 239 tmp = RREG8(DAC_DATA);
243 tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK; 240 tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
244 tmp |= MGA1064_REMHEADCTL_CLKSL_PLL; 241 tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
245 WREG_DAC(MGA1064_REMHEADCTL, tmp); 242 WREG8(DAC_DATA, tmp);
246 243
247 /* reset dotclock rate bit */ 244 /* reset dotclock rate bit */
248 WREG8(MGAREG_SEQ_INDEX, 1); 245 WREG8(MGAREG_SEQ_INDEX, 1);
@@ -253,7 +250,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
253 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 250 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
254 tmp = RREG8(DAC_DATA); 251 tmp = RREG8(DAC_DATA);
255 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 252 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
256 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 253 WREG8(DAC_DATA, tmp);
257 254
258 vcount = RREG8(MGAREG_VCOUNT); 255 vcount = RREG8(MGAREG_VCOUNT);
259 256
@@ -318,7 +315,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
318 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 315 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
319 tmp = RREG8(DAC_DATA); 316 tmp = RREG8(DAC_DATA);
320 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 317 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
321 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 318 WREG8(DAC_DATA, tmp);
322 319
323 tmp = RREG8(MGAREG_MEM_MISC_READ); 320 tmp = RREG8(MGAREG_MEM_MISC_READ);
324 tmp |= 0x3 << 2; 321 tmp |= 0x3 << 2;
@@ -326,12 +323,12 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
326 323
327 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); 324 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
328 tmp = RREG8(DAC_DATA); 325 tmp = RREG8(DAC_DATA);
329 WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40); 326 WREG8(DAC_DATA, tmp & ~0x40);
330 327
331 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 328 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
332 tmp = RREG8(DAC_DATA); 329 tmp = RREG8(DAC_DATA);
333 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 330 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
334 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 331 WREG8(DAC_DATA, tmp);
335 332
336 WREG_DAC(MGA1064_EV_PIX_PLLC_M, m); 333 WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
337 WREG_DAC(MGA1064_EV_PIX_PLLC_N, n); 334 WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
@@ -342,7 +339,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
342 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 339 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
343 tmp = RREG8(DAC_DATA); 340 tmp = RREG8(DAC_DATA);
344 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 341 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
345 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 342 WREG8(DAC_DATA, tmp);
346 343
347 udelay(500); 344 udelay(500);
348 345
@@ -350,11 +347,11 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
350 tmp = RREG8(DAC_DATA); 347 tmp = RREG8(DAC_DATA);
351 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 348 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
352 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 349 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
353 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 350 WREG8(DAC_DATA, tmp);
354 351
355 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); 352 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
356 tmp = RREG8(DAC_DATA); 353 tmp = RREG8(DAC_DATA);
357 WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40); 354 WREG8(DAC_DATA, tmp | 0x40);
358 355
359 tmp = RREG8(MGAREG_MEM_MISC_READ); 356 tmp = RREG8(MGAREG_MEM_MISC_READ);
360 tmp |= (0x3 << 2); 357 tmp |= (0x3 << 2);
@@ -363,7 +360,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
363 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 360 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
364 tmp = RREG8(DAC_DATA); 361 tmp = RREG8(DAC_DATA);
365 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 362 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
366 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 363 WREG8(DAC_DATA, tmp);
367 364
368 return 0; 365 return 0;
369} 366}
@@ -416,7 +413,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
416 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 413 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
417 tmp = RREG8(DAC_DATA); 414 tmp = RREG8(DAC_DATA);
418 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 415 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
419 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 416 WREG8(DAC_DATA, tmp);
420 417
421 tmp = RREG8(MGAREG_MEM_MISC_READ); 418 tmp = RREG8(MGAREG_MEM_MISC_READ);
422 tmp |= 0x3 << 2; 419 tmp |= 0x3 << 2;
@@ -425,7 +422,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
425 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 422 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
426 tmp = RREG8(DAC_DATA); 423 tmp = RREG8(DAC_DATA);
427 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 424 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
428 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 425 WREG8(DAC_DATA, tmp);
429 426
430 udelay(500); 427 udelay(500);
431 428
@@ -439,13 +436,13 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
439 tmp = RREG8(DAC_DATA); 436 tmp = RREG8(DAC_DATA);
440 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 437 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
441 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 438 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
442 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 439 WREG8(DAC_DATA, tmp);
443 440
444 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 441 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
445 tmp = RREG8(DAC_DATA); 442 tmp = RREG8(DAC_DATA);
446 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 443 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
447 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 444 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
448 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 445 WREG8(DAC_DATA, tmp);
449 446
450 vcount = RREG8(MGAREG_VCOUNT); 447 vcount = RREG8(MGAREG_VCOUNT);
451 448
@@ -515,12 +512,12 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
515 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 512 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
516 tmp = RREG8(DAC_DATA); 513 tmp = RREG8(DAC_DATA);
517 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 514 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
518 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 515 WREG8(DAC_DATA, tmp);
519 516
520 WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 517 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
521 tmp = RREG8(DAC_DATA); 518 tmp = RREG8(DAC_DATA);
522 tmp |= MGA1064_REMHEADCTL_CLKDIS; 519 tmp |= MGA1064_REMHEADCTL_CLKDIS;
523 WREG_DAC(MGA1064_REMHEADCTL, tmp); 520 WREG8(DAC_DATA, tmp);
524 521
525 tmp = RREG8(MGAREG_MEM_MISC_READ); 522 tmp = RREG8(MGAREG_MEM_MISC_READ);
526 tmp |= (0x3<<2) | 0xc0; 523 tmp |= (0x3<<2) | 0xc0;
@@ -530,7 +527,7 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
530 tmp = RREG8(DAC_DATA); 527 tmp = RREG8(DAC_DATA);
531 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 528 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
532 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 529 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
533 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 530 WREG8(DAC_DATA, tmp);
534 531
535 udelay(500); 532 udelay(500);
536 533
@@ -657,12 +654,26 @@ static void mga_g200wb_commit(struct drm_crtc *crtc)
657 WREG_DAC(MGA1064_GEN_IO_DATA, tmp); 654 WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
658} 655}
659 656
660 657/*
658 This is how the framebuffer base address is stored in g200 cards:
659 * Assume @offset is the gpu_addr variable of the framebuffer object
660 * Then addr is the number of _pixels_ (not bytes) from the start of
661 VRAM to the first pixel we want to display. (divided by 2 for 32bit
662 framebuffers)
663 * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers
664 addr<20> -> CRTCEXT0<6>
665 addr<19-16> -> CRTCEXT0<3-0>
666 addr<15-8> -> CRTCC<7-0>
667 addr<7-0> -> CRTCD<7-0>
668 CRTCEXT0 has to be programmed last to trigger an update and make the
669 new addr variable take effect.
670 */
661void mga_set_start_address(struct drm_crtc *crtc, unsigned offset) 671void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
662{ 672{
663 struct mga_device *mdev = crtc->dev->dev_private; 673 struct mga_device *mdev = crtc->dev->dev_private;
664 u32 addr; 674 u32 addr;
665 int count; 675 int count;
676 u8 crtcext0;
666 677
667 while (RREG8(0x1fda) & 0x08); 678 while (RREG8(0x1fda) & 0x08);
668 while (!(RREG8(0x1fda) & 0x08)); 679 while (!(RREG8(0x1fda) & 0x08));
@@ -670,10 +681,17 @@ void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
670 count = RREG8(MGAREG_VCOUNT) + 2; 681 count = RREG8(MGAREG_VCOUNT) + 2;
671 while (RREG8(MGAREG_VCOUNT) < count); 682 while (RREG8(MGAREG_VCOUNT) < count);
672 683
673 addr = offset >> 2; 684 WREG8(MGAREG_CRTCEXT_INDEX, 0);
685 crtcext0 = RREG8(MGAREG_CRTCEXT_DATA);
686 crtcext0 &= 0xB0;
687 addr = offset / 8;
688 /* Can't store addresses any higher than that...
689 but we also don't have more than 16MB of memory, so it should be fine. */
690 WARN_ON(addr > 0x1fffff);
691 crtcext0 |= (!!(addr & (1<<20)))<<6;
674 WREG_CRT(0x0d, (u8)(addr & 0xff)); 692 WREG_CRT(0x0d, (u8)(addr & 0xff));
675 WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff); 693 WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
676 WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf); 694 WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0);
677} 695}
678 696
679 697
@@ -829,11 +847,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
829 847
830 848
831 for (i = 0; i < sizeof(dacvalue); i++) { 849 for (i = 0; i < sizeof(dacvalue); i++) {
832 if ((i <= 0x03) || 850 if ((i <= 0x17) ||
833 (i == 0x07) ||
834 (i == 0x0b) ||
835 (i == 0x0f) ||
836 ((i >= 0x13) && (i <= 0x17)) ||
837 (i == 0x1b) || 851 (i == 0x1b) ||
838 (i == 0x1c) || 852 (i == 0x1c) ||
839 ((i >= 0x1f) && (i <= 0x29)) || 853 ((i >= 0x1f) && (i <= 0x29)) ||
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 6961bbeab3ed..264f55099940 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1685,6 +1685,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1685 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) }, 1685 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
1686 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, 1686 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
1687 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, 1687 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
1688 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
1688 { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) }, 1689 { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
1689 { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, 1690 { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
1690 { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, 1691 { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
@@ -2341,7 +2342,7 @@ struct hid_device *hid_allocate_device(void)
2341 2342
2342 init_waitqueue_head(&hdev->debug_wait); 2343 init_waitqueue_head(&hdev->debug_wait);
2343 INIT_LIST_HEAD(&hdev->debug_list); 2344 INIT_LIST_HEAD(&hdev->debug_list);
2344 mutex_init(&hdev->debug_list_lock); 2345 spin_lock_init(&hdev->debug_list_lock);
2345 sema_init(&hdev->driver_lock, 1); 2346 sema_init(&hdev->driver_lock, 1);
2346 sema_init(&hdev->driver_input_lock, 1); 2347 sema_init(&hdev->driver_input_lock, 1);
2347 2348
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 7e56cb3855e3..8453214ec376 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -579,15 +579,16 @@ void hid_debug_event(struct hid_device *hdev, char *buf)
579{ 579{
580 int i; 580 int i;
581 struct hid_debug_list *list; 581 struct hid_debug_list *list;
582 unsigned long flags;
582 583
583 mutex_lock(&hdev->debug_list_lock); 584 spin_lock_irqsave(&hdev->debug_list_lock, flags);
584 list_for_each_entry(list, &hdev->debug_list, node) { 585 list_for_each_entry(list, &hdev->debug_list, node) {
585 for (i = 0; i < strlen(buf); i++) 586 for (i = 0; i < strlen(buf); i++)
586 list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] = 587 list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
587 buf[i]; 588 buf[i];
588 list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE; 589 list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
589 } 590 }
590 mutex_unlock(&hdev->debug_list_lock); 591 spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
591 592
592 wake_up_interruptible(&hdev->debug_wait); 593 wake_up_interruptible(&hdev->debug_wait);
593} 594}
@@ -977,6 +978,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
977{ 978{
978 int err = 0; 979 int err = 0;
979 struct hid_debug_list *list; 980 struct hid_debug_list *list;
981 unsigned long flags;
980 982
981 if (!(list = kzalloc(sizeof(struct hid_debug_list), GFP_KERNEL))) { 983 if (!(list = kzalloc(sizeof(struct hid_debug_list), GFP_KERNEL))) {
982 err = -ENOMEM; 984 err = -ENOMEM;
@@ -992,9 +994,9 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
992 file->private_data = list; 994 file->private_data = list;
993 mutex_init(&list->read_mutex); 995 mutex_init(&list->read_mutex);
994 996
995 mutex_lock(&list->hdev->debug_list_lock); 997 spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
996 list_add_tail(&list->node, &list->hdev->debug_list); 998 list_add_tail(&list->node, &list->hdev->debug_list);
997 mutex_unlock(&list->hdev->debug_list_lock); 999 spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
998 1000
999out: 1001out:
1000 return err; 1002 return err;
@@ -1088,10 +1090,11 @@ static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait)
1088static int hid_debug_events_release(struct inode *inode, struct file *file) 1090static int hid_debug_events_release(struct inode *inode, struct file *file)
1089{ 1091{
1090 struct hid_debug_list *list = file->private_data; 1092 struct hid_debug_list *list = file->private_data;
1093 unsigned long flags;
1091 1094
1092 mutex_lock(&list->hdev->debug_list_lock); 1095 spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
1093 list_del(&list->node); 1096 list_del(&list->node);
1094 mutex_unlock(&list->hdev->debug_list_lock); 1097 spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
1095 kfree(list->hid_debug_buf); 1098 kfree(list->hid_debug_buf);
1096 kfree(list); 1099 kfree(list);
1097 1100
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
index 9b0efb0083fe..d16491192112 100644
--- a/drivers/hid/hid-steelseries.c
+++ b/drivers/hid/hid-steelseries.c
@@ -18,7 +18,8 @@
18 18
19#include "hid-ids.h" 19#include "hid-ids.h"
20 20
21#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) 21#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
22 (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
22#define SRWS1_NUMBER_LEDS 15 23#define SRWS1_NUMBER_LEDS 15
23struct steelseries_srws1_data { 24struct steelseries_srws1_data {
24 __u16 led_state; 25 __u16 led_state;
@@ -107,7 +108,8 @@ static __u8 steelseries_srws1_rdesc_fixed[] = {
1070xC0 /* End Collection */ 1080xC0 /* End Collection */
108}; 109};
109 110
110#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) 111#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
112 (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
111static void steelseries_srws1_set_leds(struct hid_device *hdev, __u16 leds) 113static void steelseries_srws1_set_leds(struct hid_device *hdev, __u16 leds)
112{ 114{
113 struct list_head *report_list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list; 115 struct list_head *report_list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list;
@@ -370,7 +372,8 @@ MODULE_DEVICE_TABLE(hid, steelseries_srws1_devices);
370static struct hid_driver steelseries_srws1_driver = { 372static struct hid_driver steelseries_srws1_driver = {
371 .name = "steelseries_srws1", 373 .name = "steelseries_srws1",
372 .id_table = steelseries_srws1_devices, 374 .id_table = steelseries_srws1_devices,
373#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) 375#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
376 (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
374 .probe = steelseries_srws1_probe, 377 .probe = steelseries_srws1_probe,
375 .remove = steelseries_srws1_remove, 378 .remove = steelseries_srws1_remove,
376#endif 379#endif
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 0e8fab1913df..fa6964d8681a 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -273,6 +273,27 @@ static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
273 .target_residency = 500, 273 .target_residency = 500,
274 .enter = &intel_idle }, 274 .enter = &intel_idle },
275 { 275 {
276 .name = "C8-HSW",
277 .desc = "MWAIT 0x40",
278 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
279 .exit_latency = 300,
280 .target_residency = 900,
281 .enter = &intel_idle },
282 {
283 .name = "C9-HSW",
284 .desc = "MWAIT 0x50",
285 .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
286 .exit_latency = 600,
287 .target_residency = 1800,
288 .enter = &intel_idle },
289 {
290 .name = "C10-HSW",
291 .desc = "MWAIT 0x60",
292 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
293 .exit_latency = 2600,
294 .target_residency = 7700,
295 .enter = &intel_idle },
296 {
276 .enter = NULL } 297 .enter = NULL }
277}; 298};
278 299
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 699187ab3800..5b9ac32801c7 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -1002,6 +1002,7 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
1002 kill_guest(&lg->cpus[0], 1002 kill_guest(&lg->cpus[0],
1003 "Cannot populate switcher mapping"); 1003 "Cannot populate switcher mapping");
1004 } 1004 }
1005 lg->pgdirs[pgdir].last_host_cpu = -1;
1005 } 1006 }
1006} 1007}
1007 1008
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index c6083132c4b8..0387e05cdb98 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -319,6 +319,9 @@ static void __cache_size_refresh(void)
319static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, 319static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
320 enum data_mode *data_mode) 320 enum data_mode *data_mode)
321{ 321{
322 unsigned noio_flag;
323 void *ptr;
324
322 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { 325 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
323 *data_mode = DATA_MODE_SLAB; 326 *data_mode = DATA_MODE_SLAB;
324 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); 327 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
@@ -332,7 +335,26 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
332 } 335 }
333 336
334 *data_mode = DATA_MODE_VMALLOC; 337 *data_mode = DATA_MODE_VMALLOC;
335 return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); 338
339 /*
340 * __vmalloc allocates the data pages and auxiliary structures with
341 * gfp_flags that were specified, but pagetables are always allocated
342 * with GFP_KERNEL, no matter what was specified as gfp_mask.
343 *
344 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
345 * all allocations done by this process (including pagetables) are done
346 * as if GFP_NOIO was specified.
347 */
348
349 if (gfp_mask & __GFP_NORETRY)
350 noio_flag = memalloc_noio_save();
351
352 ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
353
354 if (gfp_mask & __GFP_NORETRY)
355 memalloc_noio_restore(noio_flag);
356
357 return ptr;
336} 358}
337 359
338/* 360/*
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 83e995fece88..1af7255bbffb 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1044,7 +1044,7 @@ void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
1044 struct dm_cache_statistics *stats) 1044 struct dm_cache_statistics *stats)
1045{ 1045{
1046 down_read(&cmd->root_lock); 1046 down_read(&cmd->root_lock);
1047 memcpy(stats, &cmd->stats, sizeof(*stats)); 1047 *stats = cmd->stats;
1048 up_read(&cmd->root_lock); 1048 up_read(&cmd->root_lock);
1049} 1049}
1050 1050
@@ -1052,7 +1052,7 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
1052 struct dm_cache_statistics *stats) 1052 struct dm_cache_statistics *stats)
1053{ 1053{
1054 down_write(&cmd->root_lock); 1054 down_write(&cmd->root_lock);
1055 memcpy(&cmd->stats, stats, sizeof(*stats)); 1055 cmd->stats = *stats;
1056 up_write(&cmd->root_lock); 1056 up_write(&cmd->root_lock);
1057} 1057}
1058 1058
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index 558bdfdabf5f..33369ca9614f 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -130,8 +130,8 @@ struct dm_cache_policy {
130 * 130 *
131 * Must not block. 131 * Must not block.
132 * 132 *
133 * Returns 1 iff in cache, 0 iff not, < 0 on error (-EWOULDBLOCK 133 * Returns 0 if in cache, -ENOENT if not, < 0 for other errors
134 * would be typical). 134 * (-EWOULDBLOCK would be typical).
135 */ 135 */
136 int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock); 136 int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
137 137
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 10744091e6ca..df44b60e66f2 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -205,7 +205,7 @@ struct per_bio_data {
205 /* 205 /*
206 * writethrough fields. These MUST remain at the end of this 206 * writethrough fields. These MUST remain at the end of this
207 * structure and the 'cache' member must be the first as it 207 * structure and the 'cache' member must be the first as it
208 * is used to determine the offsetof the writethrough fields. 208 * is used to determine the offset of the writethrough fields.
209 */ 209 */
210 struct cache *cache; 210 struct cache *cache;
211 dm_cblock_t cblock; 211 dm_cblock_t cblock;
@@ -393,7 +393,7 @@ static int get_cell(struct cache *cache,
393 return r; 393 return r;
394} 394}
395 395
396 /*----------------------------------------------------------------*/ 396/*----------------------------------------------------------------*/
397 397
398static bool is_dirty(struct cache *cache, dm_cblock_t b) 398static bool is_dirty(struct cache *cache, dm_cblock_t b)
399{ 399{
@@ -419,6 +419,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
419} 419}
420 420
421/*----------------------------------------------------------------*/ 421/*----------------------------------------------------------------*/
422
422static bool block_size_is_power_of_two(struct cache *cache) 423static bool block_size_is_power_of_two(struct cache *cache)
423{ 424{
424 return cache->sectors_per_block_shift >= 0; 425 return cache->sectors_per_block_shift >= 0;
@@ -667,7 +668,7 @@ static void writethrough_endio(struct bio *bio, int err)
667 668
668 /* 669 /*
669 * We can't issue this bio directly, since we're in interrupt 670 * We can't issue this bio directly, since we're in interrupt
670 * context. So it get's put on a bio list for processing by the 671 * context. So it gets put on a bio list for processing by the
671 * worker thread. 672 * worker thread.
672 */ 673 */
673 defer_writethrough_bio(pb->cache, bio); 674 defer_writethrough_bio(pb->cache, bio);
@@ -1445,6 +1446,7 @@ static void do_worker(struct work_struct *ws)
1445static void do_waker(struct work_struct *ws) 1446static void do_waker(struct work_struct *ws)
1446{ 1447{
1447 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); 1448 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
1449 policy_tick(cache->policy);
1448 wake_worker(cache); 1450 wake_worker(cache);
1449 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); 1451 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
1450} 1452}
@@ -1809,7 +1811,37 @@ static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
1809 1811
1810static struct kmem_cache *migration_cache; 1812static struct kmem_cache *migration_cache;
1811 1813
1812static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv) 1814#define NOT_CORE_OPTION 1
1815
1816static int process_config_option(struct cache *cache, const char *key, const char *value)
1817{
1818 unsigned long tmp;
1819
1820 if (!strcasecmp(key, "migration_threshold")) {
1821 if (kstrtoul(value, 10, &tmp))
1822 return -EINVAL;
1823
1824 cache->migration_threshold = tmp;
1825 return 0;
1826 }
1827
1828 return NOT_CORE_OPTION;
1829}
1830
1831static int set_config_value(struct cache *cache, const char *key, const char *value)
1832{
1833 int r = process_config_option(cache, key, value);
1834
1835 if (r == NOT_CORE_OPTION)
1836 r = policy_set_config_value(cache->policy, key, value);
1837
1838 if (r)
1839 DMWARN("bad config value for %s: %s", key, value);
1840
1841 return r;
1842}
1843
1844static int set_config_values(struct cache *cache, int argc, const char **argv)
1813{ 1845{
1814 int r = 0; 1846 int r = 0;
1815 1847
@@ -1819,12 +1851,9 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a
1819 } 1851 }
1820 1852
1821 while (argc) { 1853 while (argc) {
1822 r = policy_set_config_value(p, argv[0], argv[1]); 1854 r = set_config_value(cache, argv[0], argv[1]);
1823 if (r) { 1855 if (r)
1824 DMWARN("policy_set_config_value failed: key = '%s', value = '%s'", 1856 break;
1825 argv[0], argv[1]);
1826 return r;
1827 }
1828 1857
1829 argc -= 2; 1858 argc -= 2;
1830 argv += 2; 1859 argv += 2;
@@ -1836,8 +1865,6 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a
1836static int create_cache_policy(struct cache *cache, struct cache_args *ca, 1865static int create_cache_policy(struct cache *cache, struct cache_args *ca,
1837 char **error) 1866 char **error)
1838{ 1867{
1839 int r;
1840
1841 cache->policy = dm_cache_policy_create(ca->policy_name, 1868 cache->policy = dm_cache_policy_create(ca->policy_name,
1842 cache->cache_size, 1869 cache->cache_size,
1843 cache->origin_sectors, 1870 cache->origin_sectors,
@@ -1847,14 +1874,7 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
1847 return -ENOMEM; 1874 return -ENOMEM;
1848 } 1875 }
1849 1876
1850 r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv); 1877 return 0;
1851 if (r) {
1852 *error = "Error setting cache policy's config values";
1853 dm_cache_policy_destroy(cache->policy);
1854 cache->policy = NULL;
1855 }
1856
1857 return r;
1858} 1878}
1859 1879
1860/* 1880/*
@@ -1886,7 +1906,7 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size,
1886 return discard_block_size; 1906 return discard_block_size;
1887} 1907}
1888 1908
1889#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100) 1909#define DEFAULT_MIGRATION_THRESHOLD 2048
1890 1910
1891static int cache_create(struct cache_args *ca, struct cache **result) 1911static int cache_create(struct cache_args *ca, struct cache **result)
1892{ 1912{
@@ -1911,7 +1931,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
1911 ti->discards_supported = true; 1931 ti->discards_supported = true;
1912 ti->discard_zeroes_data_unsupported = true; 1932 ti->discard_zeroes_data_unsupported = true;
1913 1933
1914 memcpy(&cache->features, &ca->features, sizeof(cache->features)); 1934 cache->features = ca->features;
1915 ti->per_bio_data_size = get_per_bio_data_size(cache); 1935 ti->per_bio_data_size = get_per_bio_data_size(cache);
1916 1936
1917 cache->callbacks.congested_fn = cache_is_congested; 1937 cache->callbacks.congested_fn = cache_is_congested;
@@ -1948,7 +1968,15 @@ static int cache_create(struct cache_args *ca, struct cache **result)
1948 r = create_cache_policy(cache, ca, error); 1968 r = create_cache_policy(cache, ca, error);
1949 if (r) 1969 if (r)
1950 goto bad; 1970 goto bad;
1971
1951 cache->policy_nr_args = ca->policy_argc; 1972 cache->policy_nr_args = ca->policy_argc;
1973 cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
1974
1975 r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
1976 if (r) {
1977 *error = "Error setting cache policy's config values";
1978 goto bad;
1979 }
1952 1980
1953 cmd = dm_cache_metadata_open(cache->metadata_dev->bdev, 1981 cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
1954 ca->block_size, may_format, 1982 ca->block_size, may_format,
@@ -1967,10 +1995,10 @@ static int cache_create(struct cache_args *ca, struct cache **result)
1967 INIT_LIST_HEAD(&cache->quiesced_migrations); 1995 INIT_LIST_HEAD(&cache->quiesced_migrations);
1968 INIT_LIST_HEAD(&cache->completed_migrations); 1996 INIT_LIST_HEAD(&cache->completed_migrations);
1969 INIT_LIST_HEAD(&cache->need_commit_migrations); 1997 INIT_LIST_HEAD(&cache->need_commit_migrations);
1970 cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
1971 atomic_set(&cache->nr_migrations, 0); 1998 atomic_set(&cache->nr_migrations, 0);
1972 init_waitqueue_head(&cache->migration_wait); 1999 init_waitqueue_head(&cache->migration_wait);
1973 2000
2001 r = -ENOMEM;
1974 cache->nr_dirty = 0; 2002 cache->nr_dirty = 0;
1975 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); 2003 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
1976 if (!cache->dirty_bitset) { 2004 if (!cache->dirty_bitset) {
@@ -2517,23 +2545,6 @@ err:
2517 DMEMIT("Error"); 2545 DMEMIT("Error");
2518} 2546}
2519 2547
2520#define NOT_CORE_OPTION 1
2521
2522static int process_config_option(struct cache *cache, char **argv)
2523{
2524 unsigned long tmp;
2525
2526 if (!strcasecmp(argv[0], "migration_threshold")) {
2527 if (kstrtoul(argv[1], 10, &tmp))
2528 return -EINVAL;
2529
2530 cache->migration_threshold = tmp;
2531 return 0;
2532 }
2533
2534 return NOT_CORE_OPTION;
2535}
2536
2537/* 2548/*
2538 * Supports <key> <value>. 2549 * Supports <key> <value>.
2539 * 2550 *
@@ -2541,17 +2552,12 @@ static int process_config_option(struct cache *cache, char **argv)
2541 */ 2552 */
2542static int cache_message(struct dm_target *ti, unsigned argc, char **argv) 2553static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
2543{ 2554{
2544 int r;
2545 struct cache *cache = ti->private; 2555 struct cache *cache = ti->private;
2546 2556
2547 if (argc != 2) 2557 if (argc != 2)
2548 return -EINVAL; 2558 return -EINVAL;
2549 2559
2550 r = process_config_option(cache, argv); 2560 return set_config_value(cache, argv[0], argv[1]);
2551 if (r == NOT_CORE_OPTION)
2552 return policy_set_config_value(cache->policy, argv[0], argv[1]);
2553
2554 return r;
2555} 2561}
2556 2562
2557static int cache_iterate_devices(struct dm_target *ti, 2563static int cache_iterate_devices(struct dm_target *ti,
@@ -2609,7 +2615,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
2609 2615
2610static struct target_type cache_target = { 2616static struct target_type cache_target = {
2611 .name = "cache", 2617 .name = "cache",
2612 .version = {1, 1, 0}, 2618 .version = {1, 1, 1},
2613 .module = THIS_MODULE, 2619 .module = THIS_MODULE,
2614 .ctr = cache_ctr, 2620 .ctr = cache_ctr,
2615 .dtr = cache_dtr, 2621 .dtr = cache_dtr,
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 51bb81676be3..bdf26f5bd326 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -907,6 +907,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
907 907
908 ti->num_flush_bios = 1; 908 ti->num_flush_bios = 1;
909 ti->num_discard_bios = 1; 909 ti->num_discard_bios = 1;
910 ti->num_write_same_bios = 1;
910 911
911 return 0; 912 return 0;
912 913
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index c0e07026a8d1..c434e5aab2df 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1121,6 +1121,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1121 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); 1121 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1122 if (!s->pending_pool) { 1122 if (!s->pending_pool) {
1123 ti->error = "Could not allocate mempool for pending exceptions"; 1123 ti->error = "Could not allocate mempool for pending exceptions";
1124 r = -ENOMEM;
1124 goto bad_pending_pool; 1125 goto bad_pending_pool;
1125 } 1126 }
1126 1127
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index ea5e878a30b9..d907ca6227ce 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -94,7 +94,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
94static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) 94static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
95{ 95{
96 struct stripe_c *sc; 96 struct stripe_c *sc;
97 sector_t width; 97 sector_t width, tmp_len;
98 uint32_t stripes; 98 uint32_t stripes;
99 uint32_t chunk_size; 99 uint32_t chunk_size;
100 int r; 100 int r;
@@ -116,15 +116,16 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
116 } 116 }
117 117
118 width = ti->len; 118 width = ti->len;
119 if (sector_div(width, chunk_size)) { 119 if (sector_div(width, stripes)) {
120 ti->error = "Target length not divisible by " 120 ti->error = "Target length not divisible by "
121 "chunk size"; 121 "number of stripes";
122 return -EINVAL; 122 return -EINVAL;
123 } 123 }
124 124
125 if (sector_div(width, stripes)) { 125 tmp_len = width;
126 if (sector_div(tmp_len, chunk_size)) {
126 ti->error = "Target length not divisible by " 127 ti->error = "Target length not divisible by "
127 "number of stripes"; 128 "chunk size";
128 return -EINVAL; 129 return -EINVAL;
129 } 130 }
130 131
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index e50dad0c65f4..1ff252ab7d46 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1442,7 +1442,7 @@ static bool dm_table_supports_write_same(struct dm_table *t)
1442 return false; 1442 return false;
1443 1443
1444 if (!ti->type->iterate_devices || 1444 if (!ti->type->iterate_devices ||
1445 !ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) 1445 ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
1446 return false; 1446 return false;
1447 } 1447 }
1448 1448
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 00cee02f8fc9..60bce435f4fa 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1645,12 +1645,12 @@ int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
1645 return r; 1645 return r;
1646} 1646}
1647 1647
1648static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) 1648static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count)
1649{ 1649{
1650 int r; 1650 int r;
1651 dm_block_t old_count; 1651 dm_block_t old_count;
1652 1652
1653 r = dm_sm_get_nr_blocks(pmd->data_sm, &old_count); 1653 r = dm_sm_get_nr_blocks(sm, &old_count);
1654 if (r) 1654 if (r)
1655 return r; 1655 return r;
1656 1656
@@ -1658,11 +1658,11 @@ static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
1658 return 0; 1658 return 0;
1659 1659
1660 if (new_count < old_count) { 1660 if (new_count < old_count) {
1661 DMERR("cannot reduce size of data device"); 1661 DMERR("cannot reduce size of space map");
1662 return -EINVAL; 1662 return -EINVAL;
1663 } 1663 }
1664 1664
1665 return dm_sm_extend(pmd->data_sm, new_count - old_count); 1665 return dm_sm_extend(sm, new_count - old_count);
1666} 1666}
1667 1667
1668int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) 1668int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
@@ -1671,7 +1671,19 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
1671 1671
1672 down_write(&pmd->root_lock); 1672 down_write(&pmd->root_lock);
1673 if (!pmd->fail_io) 1673 if (!pmd->fail_io)
1674 r = __resize_data_dev(pmd, new_count); 1674 r = __resize_space_map(pmd->data_sm, new_count);
1675 up_write(&pmd->root_lock);
1676
1677 return r;
1678}
1679
1680int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
1681{
1682 int r = -EINVAL;
1683
1684 down_write(&pmd->root_lock);
1685 if (!pmd->fail_io)
1686 r = __resize_space_map(pmd->metadata_sm, new_count);
1675 up_write(&pmd->root_lock); 1687 up_write(&pmd->root_lock);
1676 1688
1677 return r; 1689 return r;
@@ -1684,3 +1696,17 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
1684 dm_bm_set_read_only(pmd->bm); 1696 dm_bm_set_read_only(pmd->bm);
1685 up_write(&pmd->root_lock); 1697 up_write(&pmd->root_lock);
1686} 1698}
1699
1700int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
1701 dm_block_t threshold,
1702 dm_sm_threshold_fn fn,
1703 void *context)
1704{
1705 int r;
1706
1707 down_write(&pmd->root_lock);
1708 r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
1709 up_write(&pmd->root_lock);
1710
1711 return r;
1712}
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 0cecc3702885..845ebbe589a9 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -8,6 +8,7 @@
8#define DM_THIN_METADATA_H 8#define DM_THIN_METADATA_H
9 9
10#include "persistent-data/dm-block-manager.h" 10#include "persistent-data/dm-block-manager.h"
11#include "persistent-data/dm-space-map.h"
11 12
12#define THIN_METADATA_BLOCK_SIZE 4096 13#define THIN_METADATA_BLOCK_SIZE 4096
13 14
@@ -185,6 +186,7 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
185 * blocks would be lost. 186 * blocks would be lost.
186 */ 187 */
187int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); 188int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
189int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
188 190
189/* 191/*
190 * Flicks the underlying block manager into read only mode, so you know 192 * Flicks the underlying block manager into read only mode, so you know
@@ -192,6 +194,11 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
192 */ 194 */
193void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd); 195void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd);
194 196
197int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
198 dm_block_t threshold,
199 dm_sm_threshold_fn fn,
200 void *context);
201
195/*----------------------------------------------------------------*/ 202/*----------------------------------------------------------------*/
196 203
197#endif 204#endif
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 004ad1652b73..759cffc45cab 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -922,7 +922,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
922 return r; 922 return r;
923 923
924 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) { 924 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
925 DMWARN("%s: reached low water mark, sending event.", 925 DMWARN("%s: reached low water mark for data device: sending event.",
926 dm_device_name(pool->pool_md)); 926 dm_device_name(pool->pool_md));
927 spin_lock_irqsave(&pool->lock, flags); 927 spin_lock_irqsave(&pool->lock, flags);
928 pool->low_water_triggered = 1; 928 pool->low_water_triggered = 1;
@@ -1281,6 +1281,10 @@ static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1281 bio_io_error(bio); 1281 bio_io_error(bio);
1282} 1282}
1283 1283
1284/*
1285 * FIXME: should we also commit due to size of transaction, measured in
1286 * metadata blocks?
1287 */
1284static int need_commit_due_to_time(struct pool *pool) 1288static int need_commit_due_to_time(struct pool *pool)
1285{ 1289{
1286 return jiffies < pool->last_commit_jiffies || 1290 return jiffies < pool->last_commit_jiffies ||
@@ -1909,6 +1913,56 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1909 return r; 1913 return r;
1910} 1914}
1911 1915
1916static void metadata_low_callback(void *context)
1917{
1918 struct pool *pool = context;
1919
1920 DMWARN("%s: reached low water mark for metadata device: sending event.",
1921 dm_device_name(pool->pool_md));
1922
1923 dm_table_event(pool->ti->table);
1924}
1925
1926static sector_t get_metadata_dev_size(struct block_device *bdev)
1927{
1928 sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
1929 char buffer[BDEVNAME_SIZE];
1930
1931 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) {
1932 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1933 bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
1934 metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING;
1935 }
1936
1937 return metadata_dev_size;
1938}
1939
1940static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
1941{
1942 sector_t metadata_dev_size = get_metadata_dev_size(bdev);
1943
1944 sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
1945
1946 return metadata_dev_size;
1947}
1948
1949/*
1950 * When a metadata threshold is crossed a dm event is triggered, and
1951 * userland should respond by growing the metadata device. We could let
1952 * userland set the threshold, like we do with the data threshold, but I'm
1953 * not sure they know enough to do this well.
1954 */
1955static dm_block_t calc_metadata_threshold(struct pool_c *pt)
1956{
1957 /*
1958 * 4M is ample for all ops with the possible exception of thin
1959 * device deletion which is harmless if it fails (just retry the
1960 * delete after you've grown the device).
1961 */
1962 dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
1963 return min((dm_block_t)1024ULL /* 4M */, quarter);
1964}
1965
1912/* 1966/*
1913 * thin-pool <metadata dev> <data dev> 1967 * thin-pool <metadata dev> <data dev>
1914 * <data block size (sectors)> 1968 * <data block size (sectors)>
@@ -1931,8 +1985,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1931 unsigned long block_size; 1985 unsigned long block_size;
1932 dm_block_t low_water_blocks; 1986 dm_block_t low_water_blocks;
1933 struct dm_dev *metadata_dev; 1987 struct dm_dev *metadata_dev;
1934 sector_t metadata_dev_size; 1988 fmode_t metadata_mode;
1935 char b[BDEVNAME_SIZE];
1936 1989
1937 /* 1990 /*
1938 * FIXME Remove validation from scope of lock. 1991 * FIXME Remove validation from scope of lock.
@@ -1944,19 +1997,32 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1944 r = -EINVAL; 1997 r = -EINVAL;
1945 goto out_unlock; 1998 goto out_unlock;
1946 } 1999 }
2000
1947 as.argc = argc; 2001 as.argc = argc;
1948 as.argv = argv; 2002 as.argv = argv;
1949 2003
1950 r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev); 2004 /*
2005 * Set default pool features.
2006 */
2007 pool_features_init(&pf);
2008
2009 dm_consume_args(&as, 4);
2010 r = parse_pool_features(&as, &pf, ti);
2011 if (r)
2012 goto out_unlock;
2013
2014 metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
2015 r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
1951 if (r) { 2016 if (r) {
1952 ti->error = "Error opening metadata block device"; 2017 ti->error = "Error opening metadata block device";
1953 goto out_unlock; 2018 goto out_unlock;
1954 } 2019 }
1955 2020
1956 metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT; 2021 /*
1957 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) 2022 * Run for the side-effect of possibly issuing a warning if the
1958 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", 2023 * device is too big.
1959 bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS); 2024 */
2025 (void) get_metadata_dev_size(metadata_dev->bdev);
1960 2026
1961 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev); 2027 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
1962 if (r) { 2028 if (r) {
@@ -1979,16 +2045,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1979 goto out; 2045 goto out;
1980 } 2046 }
1981 2047
1982 /*
1983 * Set default pool features.
1984 */
1985 pool_features_init(&pf);
1986
1987 dm_consume_args(&as, 4);
1988 r = parse_pool_features(&as, &pf, ti);
1989 if (r)
1990 goto out;
1991
1992 pt = kzalloc(sizeof(*pt), GFP_KERNEL); 2048 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
1993 if (!pt) { 2049 if (!pt) {
1994 r = -ENOMEM; 2050 r = -ENOMEM;
@@ -2040,6 +2096,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2040 } 2096 }
2041 ti->private = pt; 2097 ti->private = pt;
2042 2098
2099 r = dm_pool_register_metadata_threshold(pt->pool->pmd,
2100 calc_metadata_threshold(pt),
2101 metadata_low_callback,
2102 pool);
2103 if (r)
2104 goto out_free_pt;
2105
2043 pt->callbacks.congested_fn = pool_is_congested; 2106 pt->callbacks.congested_fn = pool_is_congested;
2044 dm_table_add_target_callbacks(ti->table, &pt->callbacks); 2107 dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2045 2108
@@ -2079,18 +2142,7 @@ static int pool_map(struct dm_target *ti, struct bio *bio)
2079 return r; 2142 return r;
2080} 2143}
2081 2144
2082/* 2145static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
2083 * Retrieves the number of blocks of the data device from
2084 * the superblock and compares it to the actual device size,
2085 * thus resizing the data device in case it has grown.
2086 *
2087 * This both copes with opening preallocated data devices in the ctr
2088 * being followed by a resume
2089 * -and-
2090 * calling the resume method individually after userspace has
2091 * grown the data device in reaction to a table event.
2092 */
2093static int pool_preresume(struct dm_target *ti)
2094{ 2146{
2095 int r; 2147 int r;
2096 struct pool_c *pt = ti->private; 2148 struct pool_c *pt = ti->private;
@@ -2098,12 +2150,7 @@ static int pool_preresume(struct dm_target *ti)
2098 sector_t data_size = ti->len; 2150 sector_t data_size = ti->len;
2099 dm_block_t sb_data_size; 2151 dm_block_t sb_data_size;
2100 2152
2101 /* 2153 *need_commit = false;
2102 * Take control of the pool object.
2103 */
2104 r = bind_control_target(pool, ti);
2105 if (r)
2106 return r;
2107 2154
2108 (void) sector_div(data_size, pool->sectors_per_block); 2155 (void) sector_div(data_size, pool->sectors_per_block);
2109 2156
@@ -2114,7 +2161,7 @@ static int pool_preresume(struct dm_target *ti)
2114 } 2161 }
2115 2162
2116 if (data_size < sb_data_size) { 2163 if (data_size < sb_data_size) {
2117 DMERR("pool target too small, is %llu blocks (expected %llu)", 2164 DMERR("pool target (%llu blocks) too small: expected %llu",
2118 (unsigned long long)data_size, sb_data_size); 2165 (unsigned long long)data_size, sb_data_size);
2119 return -EINVAL; 2166 return -EINVAL;
2120 2167
@@ -2122,17 +2169,90 @@ static int pool_preresume(struct dm_target *ti)
2122 r = dm_pool_resize_data_dev(pool->pmd, data_size); 2169 r = dm_pool_resize_data_dev(pool->pmd, data_size);
2123 if (r) { 2170 if (r) {
2124 DMERR("failed to resize data device"); 2171 DMERR("failed to resize data device");
2125 /* FIXME Stricter than necessary: Rollback transaction instead here */
2126 set_pool_mode(pool, PM_READ_ONLY); 2172 set_pool_mode(pool, PM_READ_ONLY);
2127 return r; 2173 return r;
2128 } 2174 }
2129 2175
2130 (void) commit_or_fallback(pool); 2176 *need_commit = true;
2131 } 2177 }
2132 2178
2133 return 0; 2179 return 0;
2134} 2180}
2135 2181
2182static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
2183{
2184 int r;
2185 struct pool_c *pt = ti->private;
2186 struct pool *pool = pt->pool;
2187 dm_block_t metadata_dev_size, sb_metadata_dev_size;
2188
2189 *need_commit = false;
2190
2191 metadata_dev_size = get_metadata_dev_size(pool->md_dev);
2192
2193 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
2194 if (r) {
2195 DMERR("failed to retrieve data device size");
2196 return r;
2197 }
2198
2199 if (metadata_dev_size < sb_metadata_dev_size) {
2200 DMERR("metadata device (%llu sectors) too small: expected %llu",
2201 metadata_dev_size, sb_metadata_dev_size);
2202 return -EINVAL;
2203
2204 } else if (metadata_dev_size > sb_metadata_dev_size) {
2205 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
2206 if (r) {
2207 DMERR("failed to resize metadata device");
2208 return r;
2209 }
2210
2211 *need_commit = true;
2212 }
2213
2214 return 0;
2215}
2216
2217/*
2218 * Retrieves the number of blocks of the data device from
2219 * the superblock and compares it to the actual device size,
2220 * thus resizing the data device in case it has grown.
2221 *
2222 * This both copes with opening preallocated data devices in the ctr
2223 * being followed by a resume
2224 * -and-
2225 * calling the resume method individually after userspace has
2226 * grown the data device in reaction to a table event.
2227 */
2228static int pool_preresume(struct dm_target *ti)
2229{
2230 int r;
2231 bool need_commit1, need_commit2;
2232 struct pool_c *pt = ti->private;
2233 struct pool *pool = pt->pool;
2234
2235 /*
2236 * Take control of the pool object.
2237 */
2238 r = bind_control_target(pool, ti);
2239 if (r)
2240 return r;
2241
2242 r = maybe_resize_data_dev(ti, &need_commit1);
2243 if (r)
2244 return r;
2245
2246 r = maybe_resize_metadata_dev(ti, &need_commit2);
2247 if (r)
2248 return r;
2249
2250 if (need_commit1 || need_commit2)
2251 (void) commit_or_fallback(pool);
2252
2253 return 0;
2254}
2255
2136static void pool_resume(struct dm_target *ti) 2256static void pool_resume(struct dm_target *ti)
2137{ 2257{
2138 struct pool_c *pt = ti->private; 2258 struct pool_c *pt = ti->private;
@@ -2549,7 +2669,7 @@ static struct target_type pool_target = {
2549 .name = "thin-pool", 2669 .name = "thin-pool",
2550 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 2670 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2551 DM_TARGET_IMMUTABLE, 2671 DM_TARGET_IMMUTABLE,
2552 .version = {1, 7, 0}, 2672 .version = {1, 8, 0},
2553 .module = THIS_MODULE, 2673 .module = THIS_MODULE,
2554 .ctr = pool_ctr, 2674 .ctr = pool_ctr,
2555 .dtr = pool_dtr, 2675 .dtr = pool_dtr,
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index f6d29e614ab7..e735a6d5a793 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -248,7 +248,8 @@ static struct dm_space_map ops = {
248 .new_block = sm_disk_new_block, 248 .new_block = sm_disk_new_block,
249 .commit = sm_disk_commit, 249 .commit = sm_disk_commit,
250 .root_size = sm_disk_root_size, 250 .root_size = sm_disk_root_size,
251 .copy_root = sm_disk_copy_root 251 .copy_root = sm_disk_copy_root,
252 .register_threshold_callback = NULL
252}; 253};
253 254
254struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm, 255struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 906cf3df71af..1c959684caef 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -17,6 +17,55 @@
17/*----------------------------------------------------------------*/ 17/*----------------------------------------------------------------*/
18 18
19/* 19/*
20 * An edge triggered threshold.
21 */
22struct threshold {
23 bool threshold_set;
24 bool value_set;
25 dm_block_t threshold;
26 dm_block_t current_value;
27 dm_sm_threshold_fn fn;
28 void *context;
29};
30
31static void threshold_init(struct threshold *t)
32{
33 t->threshold_set = false;
34 t->value_set = false;
35}
36
37static void set_threshold(struct threshold *t, dm_block_t value,
38 dm_sm_threshold_fn fn, void *context)
39{
40 t->threshold_set = true;
41 t->threshold = value;
42 t->fn = fn;
43 t->context = context;
44}
45
46static bool below_threshold(struct threshold *t, dm_block_t value)
47{
48 return t->threshold_set && value <= t->threshold;
49}
50
51static bool threshold_already_triggered(struct threshold *t)
52{
53 return t->value_set && below_threshold(t, t->current_value);
54}
55
56static void check_threshold(struct threshold *t, dm_block_t value)
57{
58 if (below_threshold(t, value) &&
59 !threshold_already_triggered(t))
60 t->fn(t->context);
61
62 t->value_set = true;
63 t->current_value = value;
64}
65
66/*----------------------------------------------------------------*/
67
68/*
20 * Space map interface. 69 * Space map interface.
21 * 70 *
22 * The low level disk format is written using the standard btree and 71 * The low level disk format is written using the standard btree and
@@ -54,6 +103,8 @@ struct sm_metadata {
54 unsigned allocated_this_transaction; 103 unsigned allocated_this_transaction;
55 unsigned nr_uncommitted; 104 unsigned nr_uncommitted;
56 struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS]; 105 struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
106
107 struct threshold threshold;
57}; 108};
58 109
59static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b) 110static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
@@ -144,12 +195,6 @@ static void sm_metadata_destroy(struct dm_space_map *sm)
144 kfree(smm); 195 kfree(smm);
145} 196}
146 197
147static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
148{
149 DMERR("doesn't support extend");
150 return -EINVAL;
151}
152
153static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count) 198static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
154{ 199{
155 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 200 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
@@ -335,9 +380,19 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
335 380
336static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b) 381static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
337{ 382{
383 dm_block_t count;
384 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
385
338 int r = sm_metadata_new_block_(sm, b); 386 int r = sm_metadata_new_block_(sm, b);
339 if (r) 387 if (r)
340 DMERR("unable to allocate new metadata block"); 388 DMERR("unable to allocate new metadata block");
389
390 r = sm_metadata_get_nr_free(sm, &count);
391 if (r)
392 DMERR("couldn't get free block count");
393
394 check_threshold(&smm->threshold, count);
395
341 return r; 396 return r;
342} 397}
343 398
@@ -357,6 +412,18 @@ static int sm_metadata_commit(struct dm_space_map *sm)
357 return 0; 412 return 0;
358} 413}
359 414
415static int sm_metadata_register_threshold_callback(struct dm_space_map *sm,
416 dm_block_t threshold,
417 dm_sm_threshold_fn fn,
418 void *context)
419{
420 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
421
422 set_threshold(&smm->threshold, threshold, fn, context);
423
424 return 0;
425}
426
360static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result) 427static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result)
361{ 428{
362 *result = sizeof(struct disk_sm_root); 429 *result = sizeof(struct disk_sm_root);
@@ -382,6 +449,8 @@ static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t
382 return 0; 449 return 0;
383} 450}
384 451
452static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks);
453
385static struct dm_space_map ops = { 454static struct dm_space_map ops = {
386 .destroy = sm_metadata_destroy, 455 .destroy = sm_metadata_destroy,
387 .extend = sm_metadata_extend, 456 .extend = sm_metadata_extend,
@@ -395,7 +464,8 @@ static struct dm_space_map ops = {
395 .new_block = sm_metadata_new_block, 464 .new_block = sm_metadata_new_block,
396 .commit = sm_metadata_commit, 465 .commit = sm_metadata_commit,
397 .root_size = sm_metadata_root_size, 466 .root_size = sm_metadata_root_size,
398 .copy_root = sm_metadata_copy_root 467 .copy_root = sm_metadata_copy_root,
468 .register_threshold_callback = sm_metadata_register_threshold_callback
399}; 469};
400 470
401/*----------------------------------------------------------------*/ 471/*----------------------------------------------------------------*/
@@ -410,7 +480,7 @@ static void sm_bootstrap_destroy(struct dm_space_map *sm)
410 480
411static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks) 481static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
412{ 482{
413 DMERR("boostrap doesn't support extend"); 483 DMERR("bootstrap doesn't support extend");
414 484
415 return -EINVAL; 485 return -EINVAL;
416} 486}
@@ -450,7 +520,7 @@ static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm,
450static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b, 520static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b,
451 uint32_t count) 521 uint32_t count)
452{ 522{
453 DMERR("boostrap doesn't support set_count"); 523 DMERR("bootstrap doesn't support set_count");
454 524
455 return -EINVAL; 525 return -EINVAL;
456} 526}
@@ -491,7 +561,7 @@ static int sm_bootstrap_commit(struct dm_space_map *sm)
491 561
492static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result) 562static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
493{ 563{
494 DMERR("boostrap doesn't support root_size"); 564 DMERR("bootstrap doesn't support root_size");
495 565
496 return -EINVAL; 566 return -EINVAL;
497} 567}
@@ -499,7 +569,7 @@ static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
499static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where, 569static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where,
500 size_t max) 570 size_t max)
501{ 571{
502 DMERR("boostrap doesn't support copy_root"); 572 DMERR("bootstrap doesn't support copy_root");
503 573
504 return -EINVAL; 574 return -EINVAL;
505} 575}
@@ -517,11 +587,42 @@ static struct dm_space_map bootstrap_ops = {
517 .new_block = sm_bootstrap_new_block, 587 .new_block = sm_bootstrap_new_block,
518 .commit = sm_bootstrap_commit, 588 .commit = sm_bootstrap_commit,
519 .root_size = sm_bootstrap_root_size, 589 .root_size = sm_bootstrap_root_size,
520 .copy_root = sm_bootstrap_copy_root 590 .copy_root = sm_bootstrap_copy_root,
591 .register_threshold_callback = NULL
521}; 592};
522 593
523/*----------------------------------------------------------------*/ 594/*----------------------------------------------------------------*/
524 595
596static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
597{
598 int r, i;
599 enum allocation_event ev;
600 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
601 dm_block_t old_len = smm->ll.nr_blocks;
602
603 /*
604 * Flick into a mode where all blocks get allocated in the new area.
605 */
606 smm->begin = old_len;
607 memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
608
609 /*
610 * Extend.
611 */
612 r = sm_ll_extend(&smm->ll, extra_blocks);
613
614 /*
615 * Switch back to normal behaviour.
616 */
617 memcpy(&smm->sm, &ops, sizeof(smm->sm));
618 for (i = old_len; !r && i < smm->begin; i++)
619 r = sm_ll_inc(&smm->ll, i, &ev);
620
621 return r;
622}
623
624/*----------------------------------------------------------------*/
625
525struct dm_space_map *dm_sm_metadata_init(void) 626struct dm_space_map *dm_sm_metadata_init(void)
526{ 627{
527 struct sm_metadata *smm; 628 struct sm_metadata *smm;
@@ -549,6 +650,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
549 smm->recursion_count = 0; 650 smm->recursion_count = 0;
550 smm->allocated_this_transaction = 0; 651 smm->allocated_this_transaction = 0;
551 smm->nr_uncommitted = 0; 652 smm->nr_uncommitted = 0;
653 threshold_init(&smm->threshold);
552 654
553 memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); 655 memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
554 656
@@ -590,6 +692,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm,
590 smm->recursion_count = 0; 692 smm->recursion_count = 0;
591 smm->allocated_this_transaction = 0; 693 smm->allocated_this_transaction = 0;
592 smm->nr_uncommitted = 0; 694 smm->nr_uncommitted = 0;
695 threshold_init(&smm->threshold);
593 696
594 memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll)); 697 memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
595 return 0; 698 return 0;
diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
index 1cbfc6b1638a..3e6d1153b7c4 100644
--- a/drivers/md/persistent-data/dm-space-map.h
+++ b/drivers/md/persistent-data/dm-space-map.h
@@ -9,6 +9,8 @@
9 9
10#include "dm-block-manager.h" 10#include "dm-block-manager.h"
11 11
12typedef void (*dm_sm_threshold_fn)(void *context);
13
12/* 14/*
13 * struct dm_space_map keeps a record of how many times each block in a device 15 * struct dm_space_map keeps a record of how many times each block in a device
14 * is referenced. It needs to be fixed on disk as part of the transaction. 16 * is referenced. It needs to be fixed on disk as part of the transaction.
@@ -59,6 +61,15 @@ struct dm_space_map {
59 */ 61 */
60 int (*root_size)(struct dm_space_map *sm, size_t *result); 62 int (*root_size)(struct dm_space_map *sm, size_t *result);
61 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len); 63 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
64
65 /*
66 * You can register one threshold callback which is edge-triggered
67 * when the free space in the space map drops below the threshold.
68 */
69 int (*register_threshold_callback)(struct dm_space_map *sm,
70 dm_block_t threshold,
71 dm_sm_threshold_fn fn,
72 void *context);
62}; 73};
63 74
64/*----------------------------------------------------------------*/ 75/*----------------------------------------------------------------*/
@@ -131,4 +142,16 @@ static inline int dm_sm_copy_root(struct dm_space_map *sm, void *copy_to_here_le
131 return sm->copy_root(sm, copy_to_here_le, len); 142 return sm->copy_root(sm, copy_to_here_le, len);
132} 143}
133 144
145static inline int dm_sm_register_threshold_callback(struct dm_space_map *sm,
146 dm_block_t threshold,
147 dm_sm_threshold_fn fn,
148 void *context)
149{
150 if (sm->register_threshold_callback)
151 return sm->register_threshold_callback(sm, threshold, fn, context);
152
153 return -EINVAL;
154}
155
156
134#endif /* _LINUX_DM_SPACE_MAP_H */ 157#endif /* _LINUX_DM_SPACE_MAP_H */
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index a3a851e49321..18c0d8d1ddf7 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -68,12 +68,6 @@ MODULE_LICENSE("Dual MPL/GPL");
68 68
69#if !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) 69#if !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B)
70 70
71/* The RPX series use SLOT_B */
72#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE)
73#define CONFIG_PCMCIA_SLOT_B
74#define CONFIG_BD_IS_MHZ
75#endif
76
77/* The ADS board use SLOT_A */ 71/* The ADS board use SLOT_A */
78#ifdef CONFIG_ADS 72#ifdef CONFIG_ADS
79#define CONFIG_PCMCIA_SLOT_A 73#define CONFIG_PCMCIA_SLOT_A
@@ -253,81 +247,6 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev);
253 247
254#define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */ 248#define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */
255 249
256/* ------------------------------------------------------------------------- */
257/* board specific stuff: */
258/* voltage_set(), hardware_enable() and hardware_disable() */
259/* ------------------------------------------------------------------------- */
260/* RPX Boards from Embedded Planet */
261
262#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE)
263
264/* The RPX boards seems to have it's bus monitor timeout set to 6*8 clocks.
265 * SYPCR is write once only, therefore must the slowest memory be faster
266 * than the bus monitor or we will get a machine check due to the bus timeout.
267 */
268
269#define PCMCIA_BOARD_MSG "RPX CLASSIC or RPX LITE"
270
271#undef PCMCIA_BMT_LIMIT
272#define PCMCIA_BMT_LIMIT (6*8)
273
274static int voltage_set(int slot, int vcc, int vpp)
275{
276 u32 reg = 0;
277
278 switch (vcc) {
279 case 0:
280 break;
281 case 33:
282 reg |= BCSR1_PCVCTL4;
283 break;
284 case 50:
285 reg |= BCSR1_PCVCTL5;
286 break;
287 default:
288 return 1;
289 }
290
291 switch (vpp) {
292 case 0:
293 break;
294 case 33:
295 case 50:
296 if (vcc == vpp)
297 reg |= BCSR1_PCVCTL6;
298 else
299 return 1;
300 break;
301 case 120:
302 reg |= BCSR1_PCVCTL7;
303 default:
304 return 1;
305 }
306
307 if (!((vcc == 50) || (vcc == 0)))
308 return 1;
309
310 /* first, turn off all power */
311
312 out_be32(((u32 *) RPX_CSR_ADDR),
313 in_be32(((u32 *) RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 |
314 BCSR1_PCVCTL5 |
315 BCSR1_PCVCTL6 |
316 BCSR1_PCVCTL7));
317
318 /* enable new powersettings */
319
320 out_be32(((u32 *) RPX_CSR_ADDR), in_be32(((u32 *) RPX_CSR_ADDR)) | reg);
321
322 return 0;
323}
324
325#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
326#define hardware_enable(_slot_) /* No hardware to enable */
327#define hardware_disable(_slot_) /* No hardware to disable */
328
329#endif /* CONFIG_RPXCLASSIC */
330
331/* FADS Boards from Motorola */ 250/* FADS Boards from Motorola */
332 251
333#if defined(CONFIG_FADS) 252#if defined(CONFIG_FADS)
@@ -419,65 +338,6 @@ static inline int voltage_set(int slot, int vcc, int vpp)
419 338
420#endif 339#endif
421 340
422/* ------------------------------------------------------------------------- */
423/* Motorola MBX860 */
424
425#if defined(CONFIG_MBX)
426
427#define PCMCIA_BOARD_MSG "MBX"
428
429static int voltage_set(int slot, int vcc, int vpp)
430{
431 u8 reg = 0;
432
433 switch (vcc) {
434 case 0:
435 break;
436 case 33:
437 reg |= CSR2_VCC_33;
438 break;
439 case 50:
440 reg |= CSR2_VCC_50;
441 break;
442 default:
443 return 1;
444 }
445
446 switch (vpp) {
447 case 0:
448 break;
449 case 33:
450 case 50:
451 if (vcc == vpp)
452 reg |= CSR2_VPP_VCC;
453 else
454 return 1;
455 break;
456 case 120:
457 if ((vcc == 33) || (vcc == 50))
458 reg |= CSR2_VPP_12;
459 else
460 return 1;
461 default:
462 return 1;
463 }
464
465 /* first, turn off all power */
466 out_8((u8 *) MBX_CSR2_ADDR,
467 in_8((u8 *) MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK));
468
469 /* enable new powersettings */
470 out_8((u8 *) MBX_CSR2_ADDR, in_8((u8 *) MBX_CSR2_ADDR) | reg);
471
472 return 0;
473}
474
475#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
476#define hardware_enable(_slot_) /* No hardware to enable */
477#define hardware_disable(_slot_) /* No hardware to disable */
478
479#endif /* CONFIG_MBX */
480
481#if defined(CONFIG_PRxK) 341#if defined(CONFIG_PRxK)
482#include <asm/cpld.h> 342#include <asm/cpld.h>
483extern volatile fpga_pc_regs *fpga_pc; 343extern volatile fpga_pc_regs *fpga_pc;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 3338437b559b..85772616efbf 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -781,4 +781,12 @@ config APPLE_GMUX
781 graphics as well as the backlight. Currently only backlight 781 graphics as well as the backlight. Currently only backlight
782 control is supported by the driver. 782 control is supported by the driver.
783 783
784config PVPANIC
785 tristate "pvpanic device support"
786 depends on ACPI
787 ---help---
788 This driver provides support for the pvpanic device. pvpanic is
789 a paravirtualized device provided by QEMU; it lets a virtual machine
790 (guest) communicate panic events to the host.
791
784endif # X86_PLATFORM_DEVICES 792endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index ace2b38942fe..ef0ec746f78c 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -51,3 +51,5 @@ obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
51obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o 51obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o
52obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o 52obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o
53obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o 53obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
54
55obj-$(CONFIG_PVPANIC) += pvpanic.o
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 210b5b872125..8fcb41e18b9c 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -171,6 +171,15 @@ static struct dmi_system_id asus_quirks[] = {
171 }, 171 },
172 .driver_data = &quirk_asus_x401u, 172 .driver_data = &quirk_asus_x401u,
173 }, 173 },
174 {
175 .callback = dmi_matched,
176 .ident = "ASUSTeK COMPUTER INC. X75A",
177 .matches = {
178 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
179 DMI_MATCH(DMI_PRODUCT_NAME, "X75A"),
180 },
181 .driver_data = &quirk_asus_x401u,
182 },
174 {}, 183 {},
175}; 184};
176 185
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index fa3ee6209572..1134119521ac 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -284,6 +284,7 @@ static void __init parse_da_table(const struct dmi_header *dm)
284{ 284{
285 /* Final token is a terminator, so we don't want to copy it */ 285 /* Final token is a terminator, so we don't want to copy it */
286 int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1; 286 int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1;
287 struct calling_interface_token *new_da_tokens;
287 struct calling_interface_structure *table = 288 struct calling_interface_structure *table =
288 container_of(dm, struct calling_interface_structure, header); 289 container_of(dm, struct calling_interface_structure, header);
289 290
@@ -296,12 +297,13 @@ static void __init parse_da_table(const struct dmi_header *dm)
296 da_command_address = table->cmdIOAddress; 297 da_command_address = table->cmdIOAddress;
297 da_command_code = table->cmdIOCode; 298 da_command_code = table->cmdIOCode;
298 299
299 da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) * 300 new_da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) *
300 sizeof(struct calling_interface_token), 301 sizeof(struct calling_interface_token),
301 GFP_KERNEL); 302 GFP_KERNEL);
302 303
303 if (!da_tokens) 304 if (!new_da_tokens)
304 return; 305 return;
306 da_tokens = new_da_tokens;
305 307
306 memcpy(da_tokens+da_num_tokens, table->tokens, 308 memcpy(da_tokens+da_num_tokens, table->tokens,
307 sizeof(struct calling_interface_token) * tokens); 309 sizeof(struct calling_interface_token) * tokens);
diff --git a/drivers/platform/x86/dell-wmi-aio.c b/drivers/platform/x86/dell-wmi-aio.c
index 3f945457f71c..bcf8cc6b5537 100644
--- a/drivers/platform/x86/dell-wmi-aio.c
+++ b/drivers/platform/x86/dell-wmi-aio.c
@@ -34,6 +34,14 @@ MODULE_LICENSE("GPL");
34#define EVENT_GUID1 "284A0E6B-380E-472A-921F-E52786257FB4" 34#define EVENT_GUID1 "284A0E6B-380E-472A-921F-E52786257FB4"
35#define EVENT_GUID2 "02314822-307C-4F66-BF0E-48AEAEB26CC8" 35#define EVENT_GUID2 "02314822-307C-4F66-BF0E-48AEAEB26CC8"
36 36
37struct dell_wmi_event {
38 u16 length;
39 /* 0x000: A hot key pressed or an event occurred
40 * 0x00F: A sequence of hot keys are pressed */
41 u16 type;
42 u16 event[];
43};
44
37static const char *dell_wmi_aio_guids[] = { 45static const char *dell_wmi_aio_guids[] = {
38 EVENT_GUID1, 46 EVENT_GUID1,
39 EVENT_GUID2, 47 EVENT_GUID2,
@@ -46,15 +54,41 @@ MODULE_ALIAS("wmi:"EVENT_GUID2);
46static const struct key_entry dell_wmi_aio_keymap[] = { 54static const struct key_entry dell_wmi_aio_keymap[] = {
47 { KE_KEY, 0xc0, { KEY_VOLUMEUP } }, 55 { KE_KEY, 0xc0, { KEY_VOLUMEUP } },
48 { KE_KEY, 0xc1, { KEY_VOLUMEDOWN } }, 56 { KE_KEY, 0xc1, { KEY_VOLUMEDOWN } },
57 { KE_KEY, 0xe030, { KEY_VOLUMEUP } },
58 { KE_KEY, 0xe02e, { KEY_VOLUMEDOWN } },
59 { KE_KEY, 0xe020, { KEY_MUTE } },
60 { KE_KEY, 0xe027, { KEY_DISPLAYTOGGLE } },
61 { KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } },
62 { KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } },
63 { KE_KEY, 0xe00b, { KEY_SWITCHVIDEOMODE } },
49 { KE_END, 0 } 64 { KE_END, 0 }
50}; 65};
51 66
52static struct input_dev *dell_wmi_aio_input_dev; 67static struct input_dev *dell_wmi_aio_input_dev;
53 68
69/*
70 * The new WMI event data format will follow the dell_wmi_event structure
71 * So, we will check if the buffer matches the format
72 */
73static bool dell_wmi_aio_event_check(u8 *buffer, int length)
74{
75 struct dell_wmi_event *event = (struct dell_wmi_event *)buffer;
76
77 if (event == NULL || length < 6)
78 return false;
79
80 if ((event->type == 0 || event->type == 0xf) &&
81 event->length >= 2)
82 return true;
83
84 return false;
85}
86
54static void dell_wmi_aio_notify(u32 value, void *context) 87static void dell_wmi_aio_notify(u32 value, void *context)
55{ 88{
56 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; 89 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
57 union acpi_object *obj; 90 union acpi_object *obj;
91 struct dell_wmi_event *event;
58 acpi_status status; 92 acpi_status status;
59 93
60 status = wmi_get_event_data(value, &response); 94 status = wmi_get_event_data(value, &response);
@@ -65,7 +99,7 @@ static void dell_wmi_aio_notify(u32 value, void *context)
65 99
66 obj = (union acpi_object *)response.pointer; 100 obj = (union acpi_object *)response.pointer;
67 if (obj) { 101 if (obj) {
68 unsigned int scancode; 102 unsigned int scancode = 0;
69 103
70 switch (obj->type) { 104 switch (obj->type) {
71 case ACPI_TYPE_INTEGER: 105 case ACPI_TYPE_INTEGER:
@@ -75,13 +109,22 @@ static void dell_wmi_aio_notify(u32 value, void *context)
75 scancode, 1, true); 109 scancode, 1, true);
76 break; 110 break;
77 case ACPI_TYPE_BUFFER: 111 case ACPI_TYPE_BUFFER:
78 /* Broken machines return the scancode in a buffer */ 112 if (dell_wmi_aio_event_check(obj->buffer.pointer,
79 if (obj->buffer.pointer && obj->buffer.length > 0) { 113 obj->buffer.length)) {
80 scancode = obj->buffer.pointer[0]; 114 event = (struct dell_wmi_event *)
115 obj->buffer.pointer;
116 scancode = event->event[0];
117 } else {
118 /* Broken machines return the scancode in a
119 buffer */
120 if (obj->buffer.pointer &&
121 obj->buffer.length > 0)
122 scancode = obj->buffer.pointer[0];
123 }
124 if (scancode)
81 sparse_keymap_report_event( 125 sparse_keymap_report_event(
82 dell_wmi_aio_input_dev, 126 dell_wmi_aio_input_dev,
83 scancode, 1, true); 127 scancode, 1, true);
84 }
85 break; 128 break;
86 } 129 }
87 } 130 }
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 1a779bbfb87d..8df0c5a21be2 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -71,6 +71,14 @@ enum hp_wmi_event_ids {
71 HPWMI_WIRELESS = 5, 71 HPWMI_WIRELESS = 5,
72 HPWMI_CPU_BATTERY_THROTTLE = 6, 72 HPWMI_CPU_BATTERY_THROTTLE = 6,
73 HPWMI_LOCK_SWITCH = 7, 73 HPWMI_LOCK_SWITCH = 7,
74 HPWMI_LID_SWITCH = 8,
75 HPWMI_SCREEN_ROTATION = 9,
76 HPWMI_COOLSENSE_SYSTEM_MOBILE = 0x0A,
77 HPWMI_COOLSENSE_SYSTEM_HOT = 0x0B,
78 HPWMI_PROXIMITY_SENSOR = 0x0C,
79 HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D,
80 HPWMI_PEAKSHIFT_PERIOD = 0x0F,
81 HPWMI_BATTERY_CHARGE_PERIOD = 0x10,
74}; 82};
75 83
76struct bios_args { 84struct bios_args {
@@ -536,6 +544,22 @@ static void hp_wmi_notify(u32 value, void *context)
536 break; 544 break;
537 case HPWMI_LOCK_SWITCH: 545 case HPWMI_LOCK_SWITCH:
538 break; 546 break;
547 case HPWMI_LID_SWITCH:
548 break;
549 case HPWMI_SCREEN_ROTATION:
550 break;
551 case HPWMI_COOLSENSE_SYSTEM_MOBILE:
552 break;
553 case HPWMI_COOLSENSE_SYSTEM_HOT:
554 break;
555 case HPWMI_PROXIMITY_SENSOR:
556 break;
557 case HPWMI_BACKLIT_KB_BRIGHTNESS:
558 break;
559 case HPWMI_PEAKSHIFT_PERIOD:
560 break;
561 case HPWMI_BATTERY_CHARGE_PERIOD:
562 break;
539 default: 563 default:
540 pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data); 564 pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
541 break; 565 break;
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index e64a7a870d42..a8e43cf70fac 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -362,7 +362,8 @@ static int lis3lv02d_suspend(struct device *dev)
362 362
363static int lis3lv02d_resume(struct device *dev) 363static int lis3lv02d_resume(struct device *dev)
364{ 364{
365 return lis3lv02d_poweron(&lis3_dev); 365 lis3lv02d_poweron(&lis3_dev);
366 return 0;
366} 367}
367 368
368static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume); 369static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 17f00b8dc5cb..89c4519d48ac 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -640,7 +640,8 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
640 for (bit = 0; bit < 16; bit++) { 640 for (bit = 0; bit < 16; bit++) {
641 if (test_bit(bit, &value)) { 641 if (test_bit(bit, &value)) {
642 switch (bit) { 642 switch (bit) {
643 case 6: 643 case 0: /* Z580 */
644 case 6: /* Z570 */
644 /* Thermal Management button */ 645 /* Thermal Management button */
645 ideapad_input_report(priv, 65); 646 ideapad_input_report(priv, 65);
646 break; 647 break;
@@ -648,6 +649,9 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
648 /* OneKey Theater button */ 649 /* OneKey Theater button */
649 ideapad_input_report(priv, 64); 650 ideapad_input_report(priv, 64);
650 break; 651 break;
652 default:
653 pr_info("Unknown special button: %lu\n", bit);
654 break;
651 } 655 }
652 } 656 }
653 } 657 }
diff --git a/drivers/platform/x86/pvpanic.c b/drivers/platform/x86/pvpanic.c
new file mode 100644
index 000000000000..47ae0c47d4b5
--- /dev/null
+++ b/drivers/platform/x86/pvpanic.c
@@ -0,0 +1,124 @@
1/*
2 * pvpanic.c - pvpanic Device Support
3 *
4 * Copyright (C) 2013 Fujitsu.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/types.h>
27#include <acpi/acpi_bus.h>
28#include <acpi/acpi_drivers.h>
29
30MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
31MODULE_DESCRIPTION("pvpanic device driver");
32MODULE_LICENSE("GPL");
33
34static int pvpanic_add(struct acpi_device *device);
35static int pvpanic_remove(struct acpi_device *device);
36
37static const struct acpi_device_id pvpanic_device_ids[] = {
38 { "QEMU0001", 0 },
39 { "", 0 },
40};
41MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids);
42
43#define PVPANIC_PANICKED (1 << 0)
44
45static u16 port;
46
47static struct acpi_driver pvpanic_driver = {
48 .name = "pvpanic",
49 .class = "QEMU",
50 .ids = pvpanic_device_ids,
51 .ops = {
52 .add = pvpanic_add,
53 .remove = pvpanic_remove,
54 },
55 .owner = THIS_MODULE,
56};
57
58static void
59pvpanic_send_event(unsigned int event)
60{
61 outb(event, port);
62}
63
64static int
65pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
66 void *unused)
67{
68 pvpanic_send_event(PVPANIC_PANICKED);
69 return NOTIFY_DONE;
70}
71
72static struct notifier_block pvpanic_panic_nb = {
73 .notifier_call = pvpanic_panic_notify,
74};
75
76
77static acpi_status
78pvpanic_walk_resources(struct acpi_resource *res, void *context)
79{
80 switch (res->type) {
81 case ACPI_RESOURCE_TYPE_END_TAG:
82 return AE_OK;
83
84 case ACPI_RESOURCE_TYPE_IO:
85 port = res->data.io.minimum;
86 return AE_OK;
87
88 default:
89 return AE_ERROR;
90 }
91}
92
93static int pvpanic_add(struct acpi_device *device)
94{
95 acpi_status status;
96 u64 ret;
97
98 status = acpi_evaluate_integer(device->handle, "_STA", NULL,
99 &ret);
100
101 if (ACPI_FAILURE(status) || (ret & 0x0B) != 0x0B)
102 return -ENODEV;
103
104 acpi_walk_resources(device->handle, METHOD_NAME__CRS,
105 pvpanic_walk_resources, NULL);
106
107 if (!port)
108 return -ENODEV;
109
110 atomic_notifier_chain_register(&panic_notifier_list,
111 &pvpanic_panic_nb);
112
113 return 0;
114}
115
116static int pvpanic_remove(struct acpi_device *device)
117{
118
119 atomic_notifier_chain_unregister(&panic_notifier_list,
120 &pvpanic_panic_nb);
121 return 0;
122}
123
124module_acpi_driver(pvpanic_driver);
diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
index 5f770059fd4d..1a90b62a71c6 100644
--- a/drivers/platform/x86/samsung-q10.c
+++ b/drivers/platform/x86/samsung-q10.c
@@ -176,10 +176,7 @@ static int __init samsungq10_init(void)
176 samsungq10_probe, 176 samsungq10_probe,
177 NULL, 0, NULL, 0); 177 NULL, 0, NULL, 0);
178 178
179 if (IS_ERR(samsungq10_device)) 179 return PTR_RET(samsungq10_device);
180 return PTR_ERR(samsungq10_device);
181
182 return 0;
183} 180}
184 181
185static void __exit samsungq10_exit(void) 182static void __exit samsungq10_exit(void)
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index d544e3aaf761..2ac045f27f10 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1255,6 +1255,11 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
1255 real_ev = __sony_nc_gfx_switch_status_get(); 1255 real_ev = __sony_nc_gfx_switch_status_get();
1256 break; 1256 break;
1257 1257
1258 case 0x015B:
1259 /* Hybrid GFX switching SVS151290S */
1260 ev_type = GFX_SWITCH;
1261 real_ev = __sony_nc_gfx_switch_status_get();
1262 break;
1258 default: 1263 default:
1259 dprintk("Unknown event 0x%x for handle 0x%x\n", 1264 dprintk("Unknown event 0x%x for handle 0x%x\n",
1260 event, handle); 1265 event, handle);
@@ -1353,6 +1358,7 @@ static void sony_nc_function_setup(struct acpi_device *device,
1353 break; 1358 break;
1354 case 0x0128: 1359 case 0x0128:
1355 case 0x0146: 1360 case 0x0146:
1361 case 0x015B:
1356 result = sony_nc_gfx_switch_setup(pf_device, handle); 1362 result = sony_nc_gfx_switch_setup(pf_device, handle);
1357 if (result) 1363 if (result)
1358 pr_err("couldn't set up GFX Switch status (%d)\n", 1364 pr_err("couldn't set up GFX Switch status (%d)\n",
@@ -1375,6 +1381,7 @@ static void sony_nc_function_setup(struct acpi_device *device,
1375 case 0x0143: 1381 case 0x0143:
1376 case 0x014b: 1382 case 0x014b:
1377 case 0x014c: 1383 case 0x014c:
1384 case 0x0163:
1378 result = sony_nc_kbd_backlight_setup(pf_device, handle); 1385 result = sony_nc_kbd_backlight_setup(pf_device, handle);
1379 if (result) 1386 if (result)
1380 pr_err("couldn't set up keyboard backlight function (%d)\n", 1387 pr_err("couldn't set up keyboard backlight function (%d)\n",
@@ -1426,6 +1433,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
1426 break; 1433 break;
1427 case 0x0128: 1434 case 0x0128:
1428 case 0x0146: 1435 case 0x0146:
1436 case 0x015B:
1429 sony_nc_gfx_switch_cleanup(pd); 1437 sony_nc_gfx_switch_cleanup(pd);
1430 break; 1438 break;
1431 case 0x0131: 1439 case 0x0131:
@@ -1439,6 +1447,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
1439 case 0x0143: 1447 case 0x0143:
1440 case 0x014b: 1448 case 0x014b:
1441 case 0x014c: 1449 case 0x014c:
1450 case 0x0163:
1442 sony_nc_kbd_backlight_cleanup(pd); 1451 sony_nc_kbd_backlight_cleanup(pd);
1443 break; 1452 break;
1444 default: 1453 default:
@@ -1485,6 +1494,7 @@ static void sony_nc_function_resume(void)
1485 case 0x0143: 1494 case 0x0143:
1486 case 0x014b: 1495 case 0x014b:
1487 case 0x014c: 1496 case 0x014c:
1497 case 0x0163:
1488 sony_nc_kbd_backlight_resume(); 1498 sony_nc_kbd_backlight_resume();
1489 break; 1499 break;
1490 default: 1500 default:
@@ -2390,7 +2400,9 @@ static int __sony_nc_gfx_switch_status_get(void)
2390{ 2400{
2391 unsigned int result; 2401 unsigned int result;
2392 2402
2393 if (sony_call_snc_handle(gfxs_ctl->handle, 0x0100, &result)) 2403 if (sony_call_snc_handle(gfxs_ctl->handle,
2404 gfxs_ctl->handle == 0x015B ? 0x0000 : 0x0100,
2405 &result))
2394 return -EIO; 2406 return -EIO;
2395 2407
2396 switch (gfxs_ctl->handle) { 2408 switch (gfxs_ctl->handle) {
@@ -2400,6 +2412,12 @@ static int __sony_nc_gfx_switch_status_get(void)
2400 */ 2412 */
2401 return result & 0x1 ? SPEED : STAMINA; 2413 return result & 0x1 ? SPEED : STAMINA;
2402 break; 2414 break;
2415 case 0x015B:
2416 /* 0: discrete GFX (speed)
2417 * 1: integrated GFX (stamina)
2418 */
2419 return result & 0x1 ? STAMINA : SPEED;
2420 break;
2403 case 0x0128: 2421 case 0x0128:
2404 /* it's a more elaborated bitmask, for now: 2422 /* it's a more elaborated bitmask, for now:
2405 * 2: integrated GFX (stamina) 2423 * 2: integrated GFX (stamina)
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index db95c547c09d..86af29f53bbe 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1353,6 +1353,8 @@ config SCSI_LPFC
1353 tristate "Emulex LightPulse Fibre Channel Support" 1353 tristate "Emulex LightPulse Fibre Channel Support"
1354 depends on PCI && SCSI 1354 depends on PCI && SCSI
1355 select SCSI_FC_ATTRS 1355 select SCSI_FC_ATTRS
1356 select GENERIC_CSUM
1357 select CRC_T10DIF
1356 help 1358 help
1357 This lpfc driver supports the Emulex LightPulse 1359 This lpfc driver supports the Emulex LightPulse
1358 Family of Fibre Channel PCI host adapters. 1360 Family of Fibre Channel PCI host adapters.
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index 64136c56e706..33072388ea16 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -84,7 +84,7 @@ static void asd_set_ddb_type(struct domain_device *dev)
84 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; 84 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
85 int ddb = (int) (unsigned long) dev->lldd_dev; 85 int ddb = (int) (unsigned long) dev->lldd_dev;
86 86
87 if (dev->dev_type == SATA_PM_PORT) 87 if (dev->dev_type == SAS_SATA_PM_PORT)
88 asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT); 88 asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT);
89 else if (dev->tproto) 89 else if (dev->tproto)
90 asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET); 90 asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET);
@@ -116,7 +116,7 @@ void asd_set_dmamode(struct domain_device *dev)
116 int ddb = (int) (unsigned long) dev->lldd_dev; 116 int ddb = (int) (unsigned long) dev->lldd_dev;
117 u32 qdepth = 0; 117 u32 qdepth = 0;
118 118
119 if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) { 119 if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM_PORT) {
120 if (ata_id_has_ncq(ata_dev->id)) 120 if (ata_id_has_ncq(ata_dev->id))
121 qdepth = ata_id_queue_depth(ata_dev->id); 121 qdepth = ata_id_queue_depth(ata_dev->id);
122 asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK, 122 asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
@@ -140,8 +140,8 @@ static int asd_init_sata(struct domain_device *dev)
140 int ddb = (int) (unsigned long) dev->lldd_dev; 140 int ddb = (int) (unsigned long) dev->lldd_dev;
141 141
142 asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF); 142 asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
143 if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM || 143 if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM ||
144 dev->dev_type == SATA_PM_PORT) { 144 dev->dev_type == SAS_SATA_PM_PORT) {
145 struct dev_to_host_fis *fis = (struct dev_to_host_fis *) 145 struct dev_to_host_fis *fis = (struct dev_to_host_fis *)
146 dev->frame_rcvd; 146 dev->frame_rcvd;
147 asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status); 147 asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status);
@@ -174,7 +174,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
174 asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask); 174 asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask);
175 if (dev->port->oob_mode != SATA_OOB_MODE) { 175 if (dev->port->oob_mode != SATA_OOB_MODE) {
176 flags |= OPEN_REQUIRED; 176 flags |= OPEN_REQUIRED;
177 if ((dev->dev_type == SATA_DEV) || 177 if ((dev->dev_type == SAS_SATA_DEV) ||
178 (dev->tproto & SAS_PROTOCOL_STP)) { 178 (dev->tproto & SAS_PROTOCOL_STP)) {
179 struct smp_resp *rps_resp = &dev->sata_dev.rps_resp; 179 struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
180 if (rps_resp->frame_type == SMP_RESPONSE && 180 if (rps_resp->frame_type == SMP_RESPONSE &&
@@ -188,8 +188,8 @@ static int asd_init_target_ddb(struct domain_device *dev)
188 } else { 188 } else {
189 flags |= CONCURRENT_CONN_SUPP; 189 flags |= CONCURRENT_CONN_SUPP;
190 if (!dev->parent && 190 if (!dev->parent &&
191 (dev->dev_type == EDGE_DEV || 191 (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
192 dev->dev_type == FANOUT_DEV)) 192 dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE))
193 asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN, 193 asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN,
194 4); 194 4);
195 else 195 else
@@ -198,7 +198,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
198 asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1); 198 asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1);
199 } 199 }
200 } 200 }
201 if (dev->dev_type == SATA_PM) 201 if (dev->dev_type == SAS_SATA_PM)
202 flags |= SATA_MULTIPORT; 202 flags |= SATA_MULTIPORT;
203 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags); 203 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags);
204 204
@@ -211,7 +211,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
211 asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF); 211 asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF);
212 asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF); 212 asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
213 213
214 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { 214 if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
215 i = asd_init_sata(dev); 215 i = asd_init_sata(dev);
216 if (i < 0) { 216 if (i < 0) {
217 asd_free_ddb(asd_ha, ddb); 217 asd_free_ddb(asd_ha, ddb);
@@ -219,7 +219,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
219 } 219 }
220 } 220 }
221 221
222 if (dev->dev_type == SAS_END_DEV) { 222 if (dev->dev_type == SAS_END_DEVICE) {
223 struct sas_end_device *rdev = rphy_to_end_device(dev->rphy); 223 struct sas_end_device *rdev = rphy_to_end_device(dev->rphy);
224 if (rdev->I_T_nexus_loss_timeout > 0) 224 if (rdev->I_T_nexus_loss_timeout > 0)
225 asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT, 225 asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT,
@@ -328,10 +328,10 @@ int asd_dev_found(struct domain_device *dev)
328 328
329 spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); 329 spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
330 switch (dev->dev_type) { 330 switch (dev->dev_type) {
331 case SATA_PM: 331 case SAS_SATA_PM:
332 res = asd_init_sata_pm_ddb(dev); 332 res = asd_init_sata_pm_ddb(dev);
333 break; 333 break;
334 case SATA_PM_PORT: 334 case SAS_SATA_PM_PORT:
335 res = asd_init_sata_pm_port_ddb(dev); 335 res = asd_init_sata_pm_port_ddb(dev);
336 break; 336 break;
337 default: 337 default:
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 81b736c76fff..4df867e07b20 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -74,7 +74,7 @@ static void asd_init_phy_identify(struct asd_phy *phy)
74 74
75 memset(phy->identify_frame, 0, sizeof(*phy->identify_frame)); 75 memset(phy->identify_frame, 0, sizeof(*phy->identify_frame));
76 76
77 phy->identify_frame->dev_type = SAS_END_DEV; 77 phy->identify_frame->dev_type = SAS_END_DEVICE;
78 if (phy->sas_phy.role & PHY_ROLE_INITIATOR) 78 if (phy->sas_phy.role & PHY_ROLE_INITIATOR)
79 phy->identify_frame->initiator_bits = phy->sas_phy.iproto; 79 phy->identify_frame->initiator_bits = phy->sas_phy.iproto;
80 if (phy->sas_phy.role & PHY_ROLE_TARGET) 80 if (phy->sas_phy.role & PHY_ROLE_TARGET)
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index cf9040933da6..d4c35df3d4ae 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -184,7 +184,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
184 struct sas_phy *phy = sas_get_local_phy(dev); 184 struct sas_phy *phy = sas_get_local_phy(dev);
185 /* Standard mandates link reset for ATA (type 0) and 185 /* Standard mandates link reset for ATA (type 0) and
186 * hard reset for SSP (type 1) */ 186 * hard reset for SSP (type 1) */
187 int reset_type = (dev->dev_type == SATA_DEV || 187 int reset_type = (dev->dev_type == SAS_SATA_DEV ||
188 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; 188 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
189 189
190 asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE); 190 asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index f1733dfa3ae2..777e7c0bbb4b 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2012 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 5c87768c109c..e66aa7c11a8a 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2012 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -155,6 +155,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
155 uint16_t status = 0, addl_status = 0, wrb_num = 0; 155 uint16_t status = 0, addl_status = 0, wrb_num = 0;
156 struct be_mcc_wrb *temp_wrb; 156 struct be_mcc_wrb *temp_wrb;
157 struct be_cmd_req_hdr *ioctl_hdr; 157 struct be_cmd_req_hdr *ioctl_hdr;
158 struct be_cmd_resp_hdr *ioctl_resp_hdr;
158 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; 159 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
159 160
160 if (beiscsi_error(phba)) 161 if (beiscsi_error(phba))
@@ -204,6 +205,12 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
204 ioctl_hdr->subsystem, 205 ioctl_hdr->subsystem,
205 ioctl_hdr->opcode, 206 ioctl_hdr->opcode,
206 status, addl_status); 207 status, addl_status);
208
209 if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
210 ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr;
211 if (ioctl_resp_hdr->response_length)
212 goto release_mcc_tag;
213 }
207 rc = -EAGAIN; 214 rc = -EAGAIN;
208 } 215 }
209 216
@@ -267,6 +274,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
267 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 274 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
268 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); 275 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
269 struct be_cmd_req_hdr *hdr = embedded_payload(wrb); 276 struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
277 struct be_cmd_resp_hdr *resp_hdr;
270 278
271 be_dws_le_to_cpu(compl, 4); 279 be_dws_le_to_cpu(compl, 4);
272 280
@@ -284,6 +292,11 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
284 hdr->subsystem, hdr->opcode, 292 hdr->subsystem, hdr->opcode,
285 compl_status, extd_status); 293 compl_status, extd_status);
286 294
295 if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
296 resp_hdr = (struct be_cmd_resp_hdr *) hdr;
297 if (resp_hdr->response_length)
298 return 0;
299 }
287 return -EBUSY; 300 return -EBUSY;
288 } 301 }
289 return 0; 302 return 0;
@@ -335,30 +348,26 @@ static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
335void beiscsi_async_link_state_process(struct beiscsi_hba *phba, 348void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
336 struct be_async_event_link_state *evt) 349 struct be_async_event_link_state *evt)
337{ 350{
338 switch (evt->port_link_status) { 351 if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) ||
339 case ASYNC_EVENT_LINK_DOWN: 352 ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
353 (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) {
354 phba->state = BE_ADAPTER_LINK_DOWN;
355
340 beiscsi_log(phba, KERN_ERR, 356 beiscsi_log(phba, KERN_ERR,
341 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, 357 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
342 "BC_%d : Link Down on Physical Port %d\n", 358 "BC_%d : Link Down on Port %d\n",
343 evt->physical_port); 359 evt->physical_port);
344 360
345 phba->state |= BE_ADAPTER_LINK_DOWN;
346 iscsi_host_for_each_session(phba->shost, 361 iscsi_host_for_each_session(phba->shost,
347 be2iscsi_fail_session); 362 be2iscsi_fail_session);
348 break; 363 } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
349 case ASYNC_EVENT_LINK_UP: 364 ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
365 (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
350 phba->state = BE_ADAPTER_UP; 366 phba->state = BE_ADAPTER_UP;
367
351 beiscsi_log(phba, KERN_ERR, 368 beiscsi_log(phba, KERN_ERR,
352 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, 369 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
353 "BC_%d : Link UP on Physical Port %d\n", 370 "BC_%d : Link UP on Port %d\n",
354 evt->physical_port);
355 break;
356 default:
357 beiscsi_log(phba, KERN_ERR,
358 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
359 "BC_%d : Unexpected Async Notification %d on"
360 "Physical Port %d\n",
361 evt->port_link_status,
362 evt->physical_port); 371 evt->physical_port);
363 } 372 }
364} 373}
@@ -479,7 +488,7 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
479{ 488{
480 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; 489 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
481 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); 490 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
482 int wait = 0; 491 uint32_t wait = 0;
483 u32 ready; 492 u32 ready;
484 493
485 do { 494 do {
@@ -527,6 +536,10 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
527 struct be_mcc_compl *compl = &mbox->compl; 536 struct be_mcc_compl *compl = &mbox->compl;
528 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); 537 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
529 538
539 status = be_mbox_db_ready_wait(ctrl);
540 if (status)
541 return status;
542
530 val &= ~MPU_MAILBOX_DB_RDY_MASK; 543 val &= ~MPU_MAILBOX_DB_RDY_MASK;
531 val |= MPU_MAILBOX_DB_HI_MASK; 544 val |= MPU_MAILBOX_DB_HI_MASK;
532 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; 545 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -580,6 +593,10 @@ static int be_mbox_notify_wait(struct beiscsi_hba *phba)
580 struct be_mcc_compl *compl = &mbox->compl; 593 struct be_mcc_compl *compl = &mbox->compl;
581 struct be_ctrl_info *ctrl = &phba->ctrl; 594 struct be_ctrl_info *ctrl = &phba->ctrl;
582 595
596 status = be_mbox_db_ready_wait(ctrl);
597 if (status)
598 return status;
599
583 val |= MPU_MAILBOX_DB_HI_MASK; 600 val |= MPU_MAILBOX_DB_HI_MASK;
584 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ 601 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
585 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; 602 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -732,6 +749,16 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
732 return status; 749 return status;
733} 750}
734 751
752/**
753 * be_cmd_fw_initialize()- Initialize FW
754 * @ctrl: Pointer to function control structure
755 *
756 * Send FW initialize pattern for the function.
757 *
758 * return
759 * Success: 0
760 * Failure: Non-Zero value
761 **/
735int be_cmd_fw_initialize(struct be_ctrl_info *ctrl) 762int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
736{ 763{
737 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 764 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
@@ -762,6 +789,47 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
762 return status; 789 return status;
763} 790}
764 791
792/**
793 * be_cmd_fw_uninit()- Uinitialize FW
794 * @ctrl: Pointer to function control structure
795 *
796 * Send FW uninitialize pattern for the function
797 *
798 * return
799 * Success: 0
800 * Failure: Non-Zero value
801 **/
802int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
803{
804 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
805 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
806 int status;
807 u8 *endian_check;
808
809 spin_lock(&ctrl->mbox_lock);
810 memset(wrb, 0, sizeof(*wrb));
811
812 endian_check = (u8 *) wrb;
813 *endian_check++ = 0xFF;
814 *endian_check++ = 0xAA;
815 *endian_check++ = 0xBB;
816 *endian_check++ = 0xFF;
817 *endian_check++ = 0xFF;
818 *endian_check++ = 0xCC;
819 *endian_check++ = 0xDD;
820 *endian_check = 0xFF;
821
822 be_dws_cpu_to_le(wrb, sizeof(*wrb));
823
824 status = be_mbox_notify(ctrl);
825 if (status)
826 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
827 "BC_%d : be_cmd_fw_uninit Failed\n");
828
829 spin_unlock(&ctrl->mbox_lock);
830 return status;
831}
832
765int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, 833int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
766 struct be_queue_info *cq, struct be_queue_info *eq, 834 struct be_queue_info *cq, struct be_queue_info *eq,
767 bool sol_evts, bool no_delay, int coalesce_wm) 835 bool sol_evts, bool no_delay, int coalesce_wm)
@@ -783,20 +851,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
783 OPCODE_COMMON_CQ_CREATE, sizeof(*req)); 851 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
784 852
785 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 853 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
786 if (chip_skh_r(ctrl->pdev)) { 854 if (is_chip_be2_be3r(phba)) {
787 req->hdr.version = MBX_CMD_VER2;
788 req->page_size = 1;
789 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
790 ctxt, coalesce_wm);
791 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
792 ctxt, no_delay);
793 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
794 __ilog2_u32(cq->len / 256));
795 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
796 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
797 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
798 AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
799 } else {
800 AMAP_SET_BITS(struct amap_cq_context, coalescwm, 855 AMAP_SET_BITS(struct amap_cq_context, coalescwm,
801 ctxt, coalesce_wm); 856 ctxt, coalesce_wm);
802 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); 857 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
@@ -809,6 +864,19 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
809 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); 864 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
810 AMAP_SET_BITS(struct amap_cq_context, func, ctxt, 865 AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
811 PCI_FUNC(ctrl->pdev->devfn)); 866 PCI_FUNC(ctrl->pdev->devfn));
867 } else {
868 req->hdr.version = MBX_CMD_VER2;
869 req->page_size = 1;
870 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
871 ctxt, coalesce_wm);
872 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
873 ctxt, no_delay);
874 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
875 __ilog2_u32(cq->len / 256));
876 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
877 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
878 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
879 AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
812 } 880 }
813 881
814 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 882 be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -949,6 +1017,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
949 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 1017 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
950 struct be_defq_create_req *req = embedded_payload(wrb); 1018 struct be_defq_create_req *req = embedded_payload(wrb);
951 struct be_dma_mem *q_mem = &dq->dma_mem; 1019 struct be_dma_mem *q_mem = &dq->dma_mem;
1020 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
952 void *ctxt = &req->context; 1021 void *ctxt = &req->context;
953 int status; 1022 int status;
954 1023
@@ -961,17 +1030,36 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
961 OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req)); 1030 OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
962 1031
963 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 1032 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
964 AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0); 1033
965 AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt, 1034 if (is_chip_be2_be3r(phba)) {
966 1); 1035 AMAP_SET_BITS(struct amap_be_default_pdu_context,
967 AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt, 1036 rx_pdid, ctxt, 0);
968 PCI_FUNC(ctrl->pdev->devfn)); 1037 AMAP_SET_BITS(struct amap_be_default_pdu_context,
969 AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt, 1038 rx_pdid_valid, ctxt, 1);
970 be_encoded_q_len(length / sizeof(struct phys_addr))); 1039 AMAP_SET_BITS(struct amap_be_default_pdu_context,
971 AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size, 1040 pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
972 ctxt, entry_size); 1041 AMAP_SET_BITS(struct amap_be_default_pdu_context,
973 AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt, 1042 ring_size, ctxt,
974 cq->id); 1043 be_encoded_q_len(length /
1044 sizeof(struct phys_addr)));
1045 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1046 default_buffer_size, ctxt, entry_size);
1047 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1048 cq_id_recv, ctxt, cq->id);
1049 } else {
1050 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1051 rx_pdid, ctxt, 0);
1052 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1053 rx_pdid_valid, ctxt, 1);
1054 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1055 ring_size, ctxt,
1056 be_encoded_q_len(length /
1057 sizeof(struct phys_addr)));
1058 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1059 default_buffer_size, ctxt, entry_size);
1060 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1061 cq_id_recv, ctxt, cq->id);
1062 }
975 1063
976 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1064 be_dws_cpu_to_le(ctxt, sizeof(req->context));
977 1065
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 23397d51ac54..99073086dfe0 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2012 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -52,6 +52,10 @@ struct be_mcc_wrb {
52 52
53/* Completion Status */ 53/* Completion Status */
54#define MCC_STATUS_SUCCESS 0x0 54#define MCC_STATUS_SUCCESS 0x0
55#define MCC_STATUS_FAILED 0x1
56#define MCC_STATUS_ILLEGAL_REQUEST 0x2
57#define MCC_STATUS_ILLEGAL_FIELD 0x3
58#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4
55 59
56#define CQE_STATUS_COMPL_MASK 0xFFFF 60#define CQE_STATUS_COMPL_MASK 0xFFFF
57#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ 61#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
@@ -118,7 +122,8 @@ struct be_async_event_trailer {
118 122
119enum { 123enum {
120 ASYNC_EVENT_LINK_DOWN = 0x0, 124 ASYNC_EVENT_LINK_DOWN = 0x0,
121 ASYNC_EVENT_LINK_UP = 0x1 125 ASYNC_EVENT_LINK_UP = 0x1,
126 ASYNC_EVENT_LOGICAL = 0x2
122}; 127};
123 128
124/** 129/**
@@ -130,6 +135,9 @@ struct be_async_event_link_state {
130 u8 port_link_status; 135 u8 port_link_status;
131 u8 port_duplex; 136 u8 port_duplex;
132 u8 port_speed; 137 u8 port_speed;
138#define BEISCSI_PHY_LINK_FAULT_NONE 0x00
139#define BEISCSI_PHY_LINK_FAULT_LOCAL 0x01
140#define BEISCSI_PHY_LINK_FAULT_REMOTE 0x02
133 u8 port_fault; 141 u8 port_fault;
134 u8 rsvd0[7]; 142 u8 rsvd0[7];
135 struct be_async_event_trailer trailer; 143 struct be_async_event_trailer trailer;
@@ -697,6 +705,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
697 uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va); 705 uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va);
698/*ISCSI Functuions */ 706/*ISCSI Functuions */
699int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); 707int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
708int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
700 709
701struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem); 710struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
702struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba); 711struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba);
@@ -751,6 +760,18 @@ struct amap_be_default_pdu_context {
751 u8 rsvd4[32]; /* dword 3 */ 760 u8 rsvd4[32]; /* dword 3 */
752} __packed; 761} __packed;
753 762
763struct amap_default_pdu_context_ext {
764 u8 rsvd0[16]; /* dword 0 */
765 u8 ring_size[4]; /* dword 0 */
766 u8 rsvd1[12]; /* dword 0 */
767 u8 rsvd2[22]; /* dword 1 */
768 u8 rx_pdid[9]; /* dword 1 */
769 u8 rx_pdid_valid; /* dword 1 */
770 u8 default_buffer_size[16]; /* dword 2 */
771 u8 cq_id_recv[16]; /* dword 2 */
772 u8 rsvd3[32]; /* dword 3 */
773} __packed;
774
754struct be_defq_create_req { 775struct be_defq_create_req {
755 struct be_cmd_req_hdr hdr; 776 struct be_cmd_req_hdr hdr;
756 u16 num_pages; 777 u16 num_pages;
@@ -896,7 +917,7 @@ struct amap_it_dmsg_cqe_v2 {
896 * stack to notify the 917 * stack to notify the
897 * controller of a posted Work Request Block 918 * controller of a posted Work Request Block
898 */ 919 */
899#define DB_WRB_POST_CID_MASK 0x3FF /* bits 0 - 9 */ 920#define DB_WRB_POST_CID_MASK 0xFFFF /* bits 0 - 16 */
900#define DB_DEF_PDU_WRB_INDEX_MASK 0xFF /* bits 0 - 9 */ 921#define DB_DEF_PDU_WRB_INDEX_MASK 0xFF /* bits 0 - 9 */
901 922
902#define DB_DEF_PDU_WRB_INDEX_SHIFT 16 923#define DB_DEF_PDU_WRB_INDEX_SHIFT 16
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 9014690fe841..ef36be003f67 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2012 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -161,7 +161,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
161 struct beiscsi_conn *beiscsi_conn, 161 struct beiscsi_conn *beiscsi_conn,
162 unsigned int cid) 162 unsigned int cid)
163{ 163{
164 if (phba->conn_table[cid]) { 164 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
165
166 if (phba->conn_table[cri_index]) {
165 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 167 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
166 "BS_%d : Connection table already occupied. Detected clash\n"); 168 "BS_%d : Connection table already occupied. Detected clash\n");
167 169
@@ -169,9 +171,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
169 } else { 171 } else {
170 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 172 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
171 "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n", 173 "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n",
172 cid, beiscsi_conn); 174 cri_index, beiscsi_conn);
173 175
174 phba->conn_table[cid] = beiscsi_conn; 176 phba->conn_table[cri_index] = beiscsi_conn;
175 } 177 }
176 return 0; 178 return 0;
177} 179}
@@ -990,9 +992,27 @@ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
990static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep) 992static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
991{ 993{
992 struct beiscsi_hba *phba = beiscsi_ep->phba; 994 struct beiscsi_hba *phba = beiscsi_ep->phba;
995 struct beiscsi_conn *beiscsi_conn;
993 996
994 beiscsi_put_cid(phba, beiscsi_ep->ep_cid); 997 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
995 beiscsi_ep->phba = NULL; 998 beiscsi_ep->phba = NULL;
999 phba->ep_array[BE_GET_CRI_FROM_CID
1000 (beiscsi_ep->ep_cid)] = NULL;
1001
1002 /**
1003 * Check if any connection resource allocated by driver
1004 * is to be freed.This case occurs when target redirection
1005 * or connection retry is done.
1006 **/
1007 if (!beiscsi_ep->conn)
1008 return;
1009
1010 beiscsi_conn = beiscsi_ep->conn;
1011 if (beiscsi_conn->login_in_progress) {
1012 beiscsi_free_mgmt_task_handles(beiscsi_conn,
1013 beiscsi_conn->task);
1014 beiscsi_conn->login_in_progress = 0;
1015 }
996} 1016}
997 1017
998/** 1018/**
@@ -1009,7 +1029,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1009{ 1029{
1010 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; 1030 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
1011 struct beiscsi_hba *phba = beiscsi_ep->phba; 1031 struct beiscsi_hba *phba = beiscsi_ep->phba;
1012 struct be_mcc_wrb *wrb;
1013 struct tcp_connect_and_offload_out *ptcpcnct_out; 1032 struct tcp_connect_and_offload_out *ptcpcnct_out;
1014 struct be_dma_mem nonemb_cmd; 1033 struct be_dma_mem nonemb_cmd;
1015 unsigned int tag; 1034 unsigned int tag;
@@ -1029,15 +1048,8 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1029 "BS_%d : In beiscsi_open_conn, ep_cid=%d\n", 1048 "BS_%d : In beiscsi_open_conn, ep_cid=%d\n",
1030 beiscsi_ep->ep_cid); 1049 beiscsi_ep->ep_cid);
1031 1050
1032 phba->ep_array[beiscsi_ep->ep_cid - 1051 phba->ep_array[BE_GET_CRI_FROM_CID
1033 phba->fw_config.iscsi_cid_start] = ep; 1052 (beiscsi_ep->ep_cid)] = ep;
1034 if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
1035 phba->params.cxns_per_ctrl * 2)) {
1036
1037 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
1038 "BS_%d : Failed in allocate iscsi cid\n");
1039 goto free_ep;
1040 }
1041 1053
1042 beiscsi_ep->cid_vld = 0; 1054 beiscsi_ep->cid_vld = 0;
1043 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 1055 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
@@ -1049,24 +1061,24 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1049 "BS_%d : Failed to allocate memory for" 1061 "BS_%d : Failed to allocate memory for"
1050 " mgmt_open_connection\n"); 1062 " mgmt_open_connection\n");
1051 1063
1052 beiscsi_put_cid(phba, beiscsi_ep->ep_cid); 1064 beiscsi_free_ep(beiscsi_ep);
1053 return -ENOMEM; 1065 return -ENOMEM;
1054 } 1066 }
1055 nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in); 1067 nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in);
1056 memset(nonemb_cmd.va, 0, nonemb_cmd.size); 1068 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
1057 tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd); 1069 tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
1058 if (!tag) { 1070 if (tag <= 0) {
1059 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 1071 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
1060 "BS_%d : mgmt_open_connection Failed for cid=%d\n", 1072 "BS_%d : mgmt_open_connection Failed for cid=%d\n",
1061 beiscsi_ep->ep_cid); 1073 beiscsi_ep->ep_cid);
1062 1074
1063 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
1064 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 1075 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
1065 nonemb_cmd.va, nonemb_cmd.dma); 1076 nonemb_cmd.va, nonemb_cmd.dma);
1077 beiscsi_free_ep(beiscsi_ep);
1066 return -EAGAIN; 1078 return -EAGAIN;
1067 } 1079 }
1068 1080
1069 ret = beiscsi_mccq_compl(phba, tag, &wrb, NULL); 1081 ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
1070 if (ret) { 1082 if (ret) {
1071 beiscsi_log(phba, KERN_ERR, 1083 beiscsi_log(phba, KERN_ERR,
1072 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 1084 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -1074,10 +1086,11 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1074 1086
1075 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 1087 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
1076 nonemb_cmd.va, nonemb_cmd.dma); 1088 nonemb_cmd.va, nonemb_cmd.dma);
1077 goto free_ep; 1089 beiscsi_free_ep(beiscsi_ep);
1090 return -EBUSY;
1078 } 1091 }
1079 1092
1080 ptcpcnct_out = embedded_payload(wrb); 1093 ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va;
1081 beiscsi_ep = ep->dd_data; 1094 beiscsi_ep = ep->dd_data;
1082 beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle; 1095 beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
1083 beiscsi_ep->cid_vld = 1; 1096 beiscsi_ep->cid_vld = 1;
@@ -1087,10 +1100,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1087 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 1100 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
1088 nonemb_cmd.va, nonemb_cmd.dma); 1101 nonemb_cmd.va, nonemb_cmd.dma);
1089 return 0; 1102 return 0;
1090
1091free_ep:
1092 beiscsi_free_ep(beiscsi_ep);
1093 return -EBUSY;
1094} 1103}
1095 1104
1096/** 1105/**
@@ -1119,6 +1128,13 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
1119 return ERR_PTR(ret); 1128 return ERR_PTR(ret);
1120 } 1129 }
1121 1130
1131 if (beiscsi_error(phba)) {
1132 ret = -EIO;
1133 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1134 "BS_%d : The FW state Not Stable!!!\n");
1135 return ERR_PTR(ret);
1136 }
1137
1122 if (phba->state != BE_ADAPTER_UP) { 1138 if (phba->state != BE_ADAPTER_UP) {
1123 ret = -EBUSY; 1139 ret = -EBUSY;
1124 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 1140 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
@@ -1201,8 +1217,10 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
1201static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba, 1217static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
1202 unsigned int cid) 1218 unsigned int cid)
1203{ 1219{
1204 if (phba->conn_table[cid]) 1220 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
1205 phba->conn_table[cid] = NULL; 1221
1222 if (phba->conn_table[cri_index])
1223 phba->conn_table[cri_index] = NULL;
1206 else { 1224 else {
1207 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1225 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1208 "BS_%d : Connection table Not occupied.\n"); 1226 "BS_%d : Connection table Not occupied.\n");
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 38eab7232159..31ddc8494398 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2012 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 4e2733d23003..d24a2867bc21 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2012 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -153,10 +153,14 @@ BEISCSI_RW_ATTR(log_enable, 0x00,
153 153
154DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); 154DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
155DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); 155DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
156DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
157DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL);
156struct device_attribute *beiscsi_attrs[] = { 158struct device_attribute *beiscsi_attrs[] = {
157 &dev_attr_beiscsi_log_enable, 159 &dev_attr_beiscsi_log_enable,
158 &dev_attr_beiscsi_drvr_ver, 160 &dev_attr_beiscsi_drvr_ver,
159 &dev_attr_beiscsi_adapter_family, 161 &dev_attr_beiscsi_adapter_family,
162 &dev_attr_beiscsi_fw_ver,
163 &dev_attr_beiscsi_active_cid_count,
160 NULL, 164 NULL,
161}; 165};
162 166
@@ -702,7 +706,7 @@ static void beiscsi_get_params(struct beiscsi_hba *phba)
702 + BE2_TMFS 706 + BE2_TMFS
703 + BE2_NOPOUT_REQ)); 707 + BE2_NOPOUT_REQ));
704 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; 708 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
705 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2; 709 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;
706 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count; 710 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
707 phba->params.num_sge_per_io = BE2_SGE; 711 phba->params.num_sge_per_io = BE2_SGE;
708 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 712 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
@@ -1032,7 +1036,6 @@ static void hwi_ring_cq_db(struct beiscsi_hba *phba,
1032static unsigned int 1036static unsigned int
1033beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, 1037beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
1034 struct beiscsi_hba *phba, 1038 struct beiscsi_hba *phba,
1035 unsigned short cid,
1036 struct pdu_base *ppdu, 1039 struct pdu_base *ppdu,
1037 unsigned long pdu_len, 1040 unsigned long pdu_len,
1038 void *pbuffer, unsigned long buf_len) 1041 void *pbuffer, unsigned long buf_len)
@@ -1144,9 +1147,10 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
1144 struct hwi_wrb_context *pwrb_context; 1147 struct hwi_wrb_context *pwrb_context;
1145 struct hwi_controller *phwi_ctrlr; 1148 struct hwi_controller *phwi_ctrlr;
1146 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp; 1149 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
1150 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
1147 1151
1148 phwi_ctrlr = phba->phwi_ctrlr; 1152 phwi_ctrlr = phba->phwi_ctrlr;
1149 pwrb_context = &phwi_ctrlr->wrb_context[cid]; 1153 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1150 if (pwrb_context->wrb_handles_available >= 2) { 1154 if (pwrb_context->wrb_handles_available >= 2) {
1151 pwrb_handle = pwrb_context->pwrb_handle_base[ 1155 pwrb_handle = pwrb_context->pwrb_handle_base[
1152 pwrb_context->alloc_index]; 1156 pwrb_context->alloc_index];
@@ -1322,8 +1326,9 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1322 hdr->t2retain = 0; 1326 hdr->t2retain = 0;
1323 hdr->flags = csol_cqe->i_flags; 1327 hdr->flags = csol_cqe->i_flags;
1324 hdr->response = csol_cqe->i_resp; 1328 hdr->response = csol_cqe->i_resp;
1325 hdr->exp_cmdsn = csol_cqe->exp_cmdsn; 1329 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1326 hdr->max_cmdsn = (csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1); 1330 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1331 csol_cqe->cmd_wnd - 1);
1327 1332
1328 hdr->dlength[0] = 0; 1333 hdr->dlength[0] = 0;
1329 hdr->dlength[1] = 0; 1334 hdr->dlength[1] = 0;
@@ -1346,9 +1351,9 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1346 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 1351 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
1347 hdr->flags = csol_cqe->i_flags; 1352 hdr->flags = csol_cqe->i_flags;
1348 hdr->response = csol_cqe->i_resp; 1353 hdr->response = csol_cqe->i_resp;
1349 hdr->exp_cmdsn = csol_cqe->exp_cmdsn; 1354 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1350 hdr->max_cmdsn = (csol_cqe->exp_cmdsn + 1355 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1351 csol_cqe->cmd_wnd - 1); 1356 csol_cqe->cmd_wnd - 1);
1352 1357
1353 hdr->itt = io_task->libiscsi_itt; 1358 hdr->itt = io_task->libiscsi_itt;
1354 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1359 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
@@ -1363,35 +1368,29 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1363 struct hwi_controller *phwi_ctrlr; 1368 struct hwi_controller *phwi_ctrlr;
1364 struct iscsi_task *task; 1369 struct iscsi_task *task;
1365 struct beiscsi_io_task *io_task; 1370 struct beiscsi_io_task *io_task;
1366 struct iscsi_conn *conn = beiscsi_conn->conn; 1371 uint16_t wrb_index, cid, cri_index;
1367 struct iscsi_session *session = conn->session;
1368 uint16_t wrb_index, cid;
1369 1372
1370 phwi_ctrlr = phba->phwi_ctrlr; 1373 phwi_ctrlr = phba->phwi_ctrlr;
1371 if (chip_skh_r(phba->pcidev)) { 1374 if (is_chip_be2_be3r(phba)) {
1372 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1375 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
1373 wrb_idx, psol); 1376 wrb_idx, psol);
1374 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1377 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
1375 cid, psol); 1378 cid, psol);
1376 } else { 1379 } else {
1377 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1380 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
1378 wrb_idx, psol); 1381 wrb_idx, psol);
1379 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1382 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
1380 cid, psol); 1383 cid, psol);
1381 } 1384 }
1382 1385
1383 pwrb_context = &phwi_ctrlr->wrb_context[ 1386 cri_index = BE_GET_CRI_FROM_CID(cid);
1384 cid - phba->fw_config.iscsi_cid_start]; 1387 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1385 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; 1388 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
1386 task = pwrb_handle->pio_handle; 1389 task = pwrb_handle->pio_handle;
1387 1390
1388 io_task = task->dd_data; 1391 io_task = task->dd_data;
1389 spin_lock_bh(&phba->mgmt_sgl_lock); 1392 memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb));
1390 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 1393 iscsi_put_task(task);
1391 spin_unlock_bh(&phba->mgmt_sgl_lock);
1392 spin_lock_bh(&session->lock);
1393 free_wrb_handle(phba, pwrb_context, pwrb_handle);
1394 spin_unlock_bh(&session->lock);
1395} 1394}
1396 1395
1397static void 1396static void
@@ -1406,8 +1405,8 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1406 hdr = (struct iscsi_nopin *)task->hdr; 1405 hdr = (struct iscsi_nopin *)task->hdr;
1407 hdr->flags = csol_cqe->i_flags; 1406 hdr->flags = csol_cqe->i_flags;
1408 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1407 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1409 hdr->max_cmdsn = be32_to_cpu(hdr->exp_cmdsn + 1408 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1410 csol_cqe->cmd_wnd - 1); 1409 csol_cqe->cmd_wnd - 1);
1411 1410
1412 hdr->opcode = ISCSI_OP_NOOP_IN; 1411 hdr->opcode = ISCSI_OP_NOOP_IN;
1413 hdr->itt = io_task->libiscsi_itt; 1412 hdr->itt = io_task->libiscsi_itt;
@@ -1418,7 +1417,26 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
1418 struct sol_cqe *psol, 1417 struct sol_cqe *psol,
1419 struct common_sol_cqe *csol_cqe) 1418 struct common_sol_cqe *csol_cqe)
1420{ 1419{
1421 if (chip_skh_r(phba->pcidev)) { 1420 if (is_chip_be2_be3r(phba)) {
1421 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
1422 i_exp_cmd_sn, psol);
1423 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
1424 i_res_cnt, psol);
1425 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
1426 i_cmd_wnd, psol);
1427 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
1428 wrb_index, psol);
1429 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
1430 cid, psol);
1431 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1432 hw_sts, psol);
1433 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
1434 i_resp, psol);
1435 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1436 i_sts, psol);
1437 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
1438 i_flags, psol);
1439 } else {
1422 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1440 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1423 i_exp_cmd_sn, psol); 1441 i_exp_cmd_sn, psol);
1424 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1442 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
@@ -1429,7 +1447,7 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
1429 cid, psol); 1447 cid, psol);
1430 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1448 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1431 hw_sts, psol); 1449 hw_sts, psol);
1432 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, 1450 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1433 i_cmd_wnd, psol); 1451 i_cmd_wnd, psol);
1434 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1452 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1435 cmd_cmpl, psol)) 1453 cmd_cmpl, psol))
@@ -1445,25 +1463,6 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
1445 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1463 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1446 o, psol)) 1464 o, psol))
1447 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; 1465 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
1448 } else {
1449 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
1450 i_exp_cmd_sn, psol);
1451 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
1452 i_res_cnt, psol);
1453 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
1454 i_cmd_wnd, psol);
1455 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
1456 wrb_index, psol);
1457 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
1458 cid, psol);
1459 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1460 hw_sts, psol);
1461 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
1462 i_resp, psol);
1463 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1464 i_sts, psol);
1465 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
1466 i_flags, psol);
1467 } 1466 }
1468} 1467}
1469 1468
@@ -1480,14 +1479,15 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1480 struct iscsi_conn *conn = beiscsi_conn->conn; 1479 struct iscsi_conn *conn = beiscsi_conn->conn;
1481 struct iscsi_session *session = conn->session; 1480 struct iscsi_session *session = conn->session;
1482 struct common_sol_cqe csol_cqe = {0}; 1481 struct common_sol_cqe csol_cqe = {0};
1482 uint16_t cri_index = 0;
1483 1483
1484 phwi_ctrlr = phba->phwi_ctrlr; 1484 phwi_ctrlr = phba->phwi_ctrlr;
1485 1485
1486 /* Copy the elements to a common structure */ 1486 /* Copy the elements to a common structure */
1487 adapter_get_sol_cqe(phba, psol, &csol_cqe); 1487 adapter_get_sol_cqe(phba, psol, &csol_cqe);
1488 1488
1489 pwrb_context = &phwi_ctrlr->wrb_context[ 1489 cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid);
1490 csol_cqe.cid - phba->fw_config.iscsi_cid_start]; 1490 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1491 1491
1492 pwrb_handle = pwrb_context->pwrb_handle_basestd[ 1492 pwrb_handle = pwrb_context->pwrb_handle_basestd[
1493 csol_cqe.wrb_index]; 1493 csol_cqe.wrb_index];
@@ -1561,15 +1561,15 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
1561 unsigned char is_header = 0; 1561 unsigned char is_header = 0;
1562 unsigned int index, dpl; 1562 unsigned int index, dpl;
1563 1563
1564 if (chip_skh_r(phba->pcidev)) { 1564 if (is_chip_be2_be3r(phba)) {
1565 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1565 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1566 dpl, pdpdu_cqe); 1566 dpl, pdpdu_cqe);
1567 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1567 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1568 index, pdpdu_cqe); 1568 index, pdpdu_cqe);
1569 } else { 1569 } else {
1570 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1570 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1571 dpl, pdpdu_cqe); 1571 dpl, pdpdu_cqe);
1572 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1572 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1573 index, pdpdu_cqe); 1573 index, pdpdu_cqe);
1574 } 1574 }
1575 1575
@@ -1613,8 +1613,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
1613 1613
1614 WARN_ON(!pasync_handle); 1614 WARN_ON(!pasync_handle);
1615 1615
1616 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid - 1616 pasync_handle->cri =
1617 phba->fw_config.iscsi_cid_start; 1617 BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
1618 pasync_handle->is_header = is_header; 1618 pasync_handle->is_header = is_header;
1619 pasync_handle->buffer_len = dpl; 1619 pasync_handle->buffer_len = dpl;
1620 *pcq_index = index; 1620 *pcq_index = index;
@@ -1856,8 +1856,6 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1856 } 1856 }
1857 1857
1858 status = beiscsi_process_async_pdu(beiscsi_conn, phba, 1858 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1859 (beiscsi_conn->beiscsi_conn_cid -
1860 phba->fw_config.iscsi_cid_start),
1861 phdr, hdr_len, pfirst_buffer, 1859 phdr, hdr_len, pfirst_buffer,
1862 offset); 1860 offset);
1863 1861
@@ -2011,6 +2009,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
2011 unsigned int num_processed = 0; 2009 unsigned int num_processed = 0;
2012 unsigned int tot_nump = 0; 2010 unsigned int tot_nump = 0;
2013 unsigned short code = 0, cid = 0; 2011 unsigned short code = 0, cid = 0;
2012 uint16_t cri_index = 0;
2014 struct beiscsi_conn *beiscsi_conn; 2013 struct beiscsi_conn *beiscsi_conn;
2015 struct beiscsi_endpoint *beiscsi_ep; 2014 struct beiscsi_endpoint *beiscsi_ep;
2016 struct iscsi_endpoint *ep; 2015 struct iscsi_endpoint *ep;
@@ -2028,7 +2027,9 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
2028 32] & CQE_CODE_MASK); 2027 32] & CQE_CODE_MASK);
2029 2028
2030 /* Get the CID */ 2029 /* Get the CID */
2031 if (chip_skh_r(phba->pcidev)) { 2030 if (is_chip_be2_be3r(phba)) {
2031 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
2032 } else {
2032 if ((code == DRIVERMSG_NOTIFY) || 2033 if ((code == DRIVERMSG_NOTIFY) ||
2033 (code == UNSOL_HDR_NOTIFY) || 2034 (code == UNSOL_HDR_NOTIFY) ||
2034 (code == UNSOL_DATA_NOTIFY)) 2035 (code == UNSOL_DATA_NOTIFY))
@@ -2038,10 +2039,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
2038 else 2039 else
2039 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 2040 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
2040 cid, sol); 2041 cid, sol);
2041 } else 2042 }
2042 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
2043 2043
2044 ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start]; 2044 cri_index = BE_GET_CRI_FROM_CID(cid);
2045 ep = phba->ep_array[cri_index];
2045 beiscsi_ep = ep->dd_data; 2046 beiscsi_ep = ep->dd_data;
2046 beiscsi_conn = beiscsi_ep->conn; 2047 beiscsi_conn = beiscsi_ep->conn;
2047 2048
@@ -2191,7 +2192,7 @@ void beiscsi_process_all_cqs(struct work_struct *work)
2191 2192
2192static int be_iopoll(struct blk_iopoll *iop, int budget) 2193static int be_iopoll(struct blk_iopoll *iop, int budget)
2193{ 2194{
2194 static unsigned int ret; 2195 unsigned int ret;
2195 struct beiscsi_hba *phba; 2196 struct beiscsi_hba *phba;
2196 struct be_eq_obj *pbe_eq; 2197 struct be_eq_obj *pbe_eq;
2197 2198
@@ -2416,11 +2417,11 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2416 /* Check for the data_count */ 2417 /* Check for the data_count */
2417 dsp_value = (task->data_count) ? 1 : 0; 2418 dsp_value = (task->data_count) ? 1 : 0;
2418 2419
2419 if (chip_skh_r(phba->pcidev)) 2420 if (is_chip_be2_be3r(phba))
2420 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, 2421 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
2421 pwrb, dsp_value); 2422 pwrb, dsp_value);
2422 else 2423 else
2423 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, 2424 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
2424 pwrb, dsp_value); 2425 pwrb, dsp_value);
2425 2426
2426 /* Map addr only if there is data_count */ 2427 /* Map addr only if there is data_count */
@@ -2538,8 +2539,9 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2538 2539
2539static int beiscsi_alloc_mem(struct beiscsi_hba *phba) 2540static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2540{ 2541{
2541 struct be_mem_descriptor *mem_descr;
2542 dma_addr_t bus_add; 2542 dma_addr_t bus_add;
2543 struct hwi_controller *phwi_ctrlr;
2544 struct be_mem_descriptor *mem_descr;
2543 struct mem_array *mem_arr, *mem_arr_orig; 2545 struct mem_array *mem_arr, *mem_arr_orig;
2544 unsigned int i, j, alloc_size, curr_alloc_size; 2546 unsigned int i, j, alloc_size, curr_alloc_size;
2545 2547
@@ -2547,9 +2549,18 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2547 if (!phba->phwi_ctrlr) 2549 if (!phba->phwi_ctrlr)
2548 return -ENOMEM; 2550 return -ENOMEM;
2549 2551
2552 /* Allocate memory for wrb_context */
2553 phwi_ctrlr = phba->phwi_ctrlr;
2554 phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) *
2555 phba->params.cxns_per_ctrl,
2556 GFP_KERNEL);
2557 if (!phwi_ctrlr->wrb_context)
2558 return -ENOMEM;
2559
2550 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), 2560 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2551 GFP_KERNEL); 2561 GFP_KERNEL);
2552 if (!phba->init_mem) { 2562 if (!phba->init_mem) {
2563 kfree(phwi_ctrlr->wrb_context);
2553 kfree(phba->phwi_ctrlr); 2564 kfree(phba->phwi_ctrlr);
2554 return -ENOMEM; 2565 return -ENOMEM;
2555 } 2566 }
@@ -2558,6 +2569,7 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2558 GFP_KERNEL); 2569 GFP_KERNEL);
2559 if (!mem_arr_orig) { 2570 if (!mem_arr_orig) {
2560 kfree(phba->init_mem); 2571 kfree(phba->init_mem);
2572 kfree(phwi_ctrlr->wrb_context);
2561 kfree(phba->phwi_ctrlr); 2573 kfree(phba->phwi_ctrlr);
2562 return -ENOMEM; 2574 return -ENOMEM;
2563 } 2575 }
@@ -2628,6 +2640,7 @@ free_mem:
2628 } 2640 }
2629 kfree(mem_arr_orig); 2641 kfree(mem_arr_orig);
2630 kfree(phba->init_mem); 2642 kfree(phba->init_mem);
2643 kfree(phba->phwi_ctrlr->wrb_context);
2631 kfree(phba->phwi_ctrlr); 2644 kfree(phba->phwi_ctrlr);
2632 return -ENOMEM; 2645 return -ENOMEM;
2633} 2646}
@@ -2666,6 +2679,7 @@ static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2666static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) 2679static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2667{ 2680{
2668 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; 2681 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2682 struct hwi_context_memory *phwi_ctxt;
2669 struct wrb_handle *pwrb_handle = NULL; 2683 struct wrb_handle *pwrb_handle = NULL;
2670 struct hwi_controller *phwi_ctrlr; 2684 struct hwi_controller *phwi_ctrlr;
2671 struct hwi_wrb_context *pwrb_context; 2685 struct hwi_wrb_context *pwrb_context;
@@ -2680,7 +2694,18 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2680 mem_descr_wrb += HWI_MEM_WRB; 2694 mem_descr_wrb += HWI_MEM_WRB;
2681 phwi_ctrlr = phba->phwi_ctrlr; 2695 phwi_ctrlr = phba->phwi_ctrlr;
2682 2696
2683 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { 2697 /* Allocate memory for WRBQ */
2698 phwi_ctxt = phwi_ctrlr->phwi_ctxt;
2699 phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
2700 phba->fw_config.iscsi_cid_count,
2701 GFP_KERNEL);
2702 if (!phwi_ctxt->be_wrbq) {
2703 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2704 "BM_%d : WRBQ Mem Alloc Failed\n");
2705 return -ENOMEM;
2706 }
2707
2708 for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
2684 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2709 pwrb_context = &phwi_ctrlr->wrb_context[index];
2685 pwrb_context->pwrb_handle_base = 2710 pwrb_context->pwrb_handle_base =
2686 kzalloc(sizeof(struct wrb_handle *) * 2711 kzalloc(sizeof(struct wrb_handle *) *
@@ -2723,7 +2748,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2723 } 2748 }
2724 } 2749 }
2725 idx = 0; 2750 idx = 0;
2726 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { 2751 for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
2727 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2752 pwrb_context = &phwi_ctrlr->wrb_context[index];
2728 if (!num_cxn_wrb) { 2753 if (!num_cxn_wrb) {
2729 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2754 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
@@ -2752,7 +2777,7 @@ init_wrb_hndl_failed:
2752 return -ENOMEM; 2777 return -ENOMEM;
2753} 2778}
2754 2779
2755static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2780static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2756{ 2781{
2757 struct hwi_controller *phwi_ctrlr; 2782 struct hwi_controller *phwi_ctrlr;
2758 struct hba_parameters *p = &phba->params; 2783 struct hba_parameters *p = &phba->params;
@@ -2770,6 +2795,15 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2770 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; 2795 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2771 memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2796 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2772 2797
2798 pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) *
2799 phba->fw_config.iscsi_cid_count,
2800 GFP_KERNEL);
2801 if (!pasync_ctx->async_entry) {
2802 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2803 "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n");
2804 return -ENOMEM;
2805 }
2806
2773 pasync_ctx->num_entries = p->asyncpdus_per_ctrl; 2807 pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
2774 pasync_ctx->buffer_size = p->defpdu_hdr_sz; 2808 pasync_ctx->buffer_size = p->defpdu_hdr_sz;
2775 2809
@@ -2934,6 +2968,8 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2934 pasync_ctx->async_header.ep_read_ptr = -1; 2968 pasync_ctx->async_header.ep_read_ptr = -1;
2935 pasync_ctx->async_data.host_write_ptr = 0; 2969 pasync_ctx->async_data.host_write_ptr = 0;
2936 pasync_ctx->async_data.ep_read_ptr = -1; 2970 pasync_ctx->async_data.ep_read_ptr = -1;
2971
2972 return 0;
2937} 2973}
2938 2974
2939static int 2975static int
@@ -3293,6 +3329,7 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3293 void *wrb_vaddr; 3329 void *wrb_vaddr;
3294 struct be_dma_mem sgl; 3330 struct be_dma_mem sgl;
3295 struct be_mem_descriptor *mem_descr; 3331 struct be_mem_descriptor *mem_descr;
3332 struct hwi_wrb_context *pwrb_context;
3296 int status; 3333 int status;
3297 3334
3298 idx = 0; 3335 idx = 0;
@@ -3351,8 +3388,9 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3351 kfree(pwrb_arr); 3388 kfree(pwrb_arr);
3352 return status; 3389 return status;
3353 } 3390 }
3354 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i]. 3391 pwrb_context = &phwi_ctrlr->wrb_context[i];
3355 id; 3392 pwrb_context->cid = phwi_context->be_wrbq[i].id;
3393 BE_SET_CID_TO_CRI(i, pwrb_context->cid);
3356 } 3394 }
3357 kfree(pwrb_arr); 3395 kfree(pwrb_arr);
3358 return 0; 3396 return 0;
@@ -3365,7 +3403,7 @@ static void free_wrb_handles(struct beiscsi_hba *phba)
3365 struct hwi_wrb_context *pwrb_context; 3403 struct hwi_wrb_context *pwrb_context;
3366 3404
3367 phwi_ctrlr = phba->phwi_ctrlr; 3405 phwi_ctrlr = phba->phwi_ctrlr;
3368 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { 3406 for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
3369 pwrb_context = &phwi_ctrlr->wrb_context[index]; 3407 pwrb_context = &phwi_ctrlr->wrb_context[index];
3370 kfree(pwrb_context->pwrb_handle_base); 3408 kfree(pwrb_context->pwrb_handle_base);
3371 kfree(pwrb_context->pwrb_handle_basestd); 3409 kfree(pwrb_context->pwrb_handle_basestd);
@@ -3394,6 +3432,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
3394 struct be_ctrl_info *ctrl = &phba->ctrl; 3432 struct be_ctrl_info *ctrl = &phba->ctrl;
3395 struct hwi_controller *phwi_ctrlr; 3433 struct hwi_controller *phwi_ctrlr;
3396 struct hwi_context_memory *phwi_context; 3434 struct hwi_context_memory *phwi_context;
3435 struct hwi_async_pdu_context *pasync_ctx;
3397 int i, eq_num; 3436 int i, eq_num;
3398 3437
3399 phwi_ctrlr = phba->phwi_ctrlr; 3438 phwi_ctrlr = phba->phwi_ctrlr;
@@ -3403,6 +3442,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
3403 if (q->created) 3442 if (q->created)
3404 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); 3443 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3405 } 3444 }
3445 kfree(phwi_context->be_wrbq);
3406 free_wrb_handles(phba); 3446 free_wrb_handles(phba);
3407 3447
3408 q = &phwi_context->be_def_hdrq; 3448 q = &phwi_context->be_def_hdrq;
@@ -3430,6 +3470,10 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
3430 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3470 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3431 } 3471 }
3432 be_mcc_queues_destroy(phba); 3472 be_mcc_queues_destroy(phba);
3473
3474 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
3475 kfree(pasync_ctx->async_entry);
3476 be_cmd_fw_uninit(ctrl);
3433} 3477}
3434 3478
3435static int be_mcc_queues_create(struct beiscsi_hba *phba, 3479static int be_mcc_queues_create(struct beiscsi_hba *phba,
@@ -3607,7 +3651,12 @@ static int hwi_init_controller(struct beiscsi_hba *phba)
3607 if (beiscsi_init_wrb_handle(phba)) 3651 if (beiscsi_init_wrb_handle(phba))
3608 return -ENOMEM; 3652 return -ENOMEM;
3609 3653
3610 hwi_init_async_pdu_ctx(phba); 3654 if (hwi_init_async_pdu_ctx(phba)) {
3655 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3656 "BM_%d : hwi_init_async_pdu_ctx failed\n");
3657 return -ENOMEM;
3658 }
3659
3611 if (hwi_init_port(phba) != 0) { 3660 if (hwi_init_port(phba) != 0) {
3612 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3661 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3613 "BM_%d : hwi_init_controller failed\n"); 3662 "BM_%d : hwi_init_controller failed\n");
@@ -3637,6 +3686,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
3637 mem_descr++; 3686 mem_descr++;
3638 } 3687 }
3639 kfree(phba->init_mem); 3688 kfree(phba->init_mem);
3689 kfree(phba->phwi_ctrlr->wrb_context);
3640 kfree(phba->phwi_ctrlr); 3690 kfree(phba->phwi_ctrlr);
3641} 3691}
3642 3692
@@ -3769,7 +3819,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3769 3819
3770static int hba_setup_cid_tbls(struct beiscsi_hba *phba) 3820static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3771{ 3821{
3772 int i, new_cid; 3822 int i;
3773 3823
3774 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl, 3824 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3775 GFP_KERNEL); 3825 GFP_KERNEL);
@@ -3780,19 +3830,33 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3780 return -ENOMEM; 3830 return -ENOMEM;
3781 } 3831 }
3782 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * 3832 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3783 phba->params.cxns_per_ctrl * 2, GFP_KERNEL); 3833 phba->params.cxns_per_ctrl, GFP_KERNEL);
3784 if (!phba->ep_array) { 3834 if (!phba->ep_array) {
3785 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3835 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3786 "BM_%d : Failed to allocate memory in " 3836 "BM_%d : Failed to allocate memory in "
3787 "hba_setup_cid_tbls\n"); 3837 "hba_setup_cid_tbls\n");
3788 kfree(phba->cid_array); 3838 kfree(phba->cid_array);
3839 phba->cid_array = NULL;
3789 return -ENOMEM; 3840 return -ENOMEM;
3790 } 3841 }
3791 new_cid = phba->fw_config.iscsi_cid_start; 3842
3792 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3843 phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
3793 phba->cid_array[i] = new_cid; 3844 phba->params.cxns_per_ctrl, GFP_KERNEL);
3794 new_cid += 2; 3845 if (!phba->conn_table) {
3846 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3847 "BM_%d : Failed to allocate memory in"
3848 "hba_setup_cid_tbls\n");
3849
3850 kfree(phba->cid_array);
3851 kfree(phba->ep_array);
3852 phba->cid_array = NULL;
3853 phba->ep_array = NULL;
3854 return -ENOMEM;
3795 } 3855 }
3856
3857 for (i = 0; i < phba->params.cxns_per_ctrl; i++)
3858 phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid;
3859
3796 phba->avlbl_cids = phba->params.cxns_per_ctrl; 3860 phba->avlbl_cids = phba->params.cxns_per_ctrl;
3797 return 0; 3861 return 0;
3798} 3862}
@@ -4062,6 +4126,53 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
4062 kfree(phba->eh_sgl_hndl_base); 4126 kfree(phba->eh_sgl_hndl_base);
4063 kfree(phba->cid_array); 4127 kfree(phba->cid_array);
4064 kfree(phba->ep_array); 4128 kfree(phba->ep_array);
4129 kfree(phba->conn_table);
4130}
4131
4132/**
4133 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources
4134 * @beiscsi_conn: ptr to the conn to be cleaned up
4135 * @task: ptr to iscsi_task resource to be freed.
4136 *
4137 * Free driver mgmt resources binded to CXN.
4138 **/
4139void
4140beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
4141 struct iscsi_task *task)
4142{
4143 struct beiscsi_io_task *io_task;
4144 struct beiscsi_hba *phba = beiscsi_conn->phba;
4145 struct hwi_wrb_context *pwrb_context;
4146 struct hwi_controller *phwi_ctrlr;
4147 uint16_t cri_index = BE_GET_CRI_FROM_CID(
4148 beiscsi_conn->beiscsi_conn_cid);
4149
4150 phwi_ctrlr = phba->phwi_ctrlr;
4151 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4152
4153 io_task = task->dd_data;
4154
4155 if (io_task->pwrb_handle) {
4156 memset(io_task->pwrb_handle->pwrb, 0,
4157 sizeof(struct iscsi_wrb));
4158 free_wrb_handle(phba, pwrb_context,
4159 io_task->pwrb_handle);
4160 io_task->pwrb_handle = NULL;
4161 }
4162
4163 if (io_task->psgl_handle) {
4164 spin_lock_bh(&phba->mgmt_sgl_lock);
4165 free_mgmt_sgl_handle(phba,
4166 io_task->psgl_handle);
4167 io_task->psgl_handle = NULL;
4168 spin_unlock_bh(&phba->mgmt_sgl_lock);
4169 }
4170
4171 if (io_task->mtask_addr)
4172 pci_unmap_single(phba->pcidev,
4173 io_task->mtask_addr,
4174 io_task->mtask_data_count,
4175 PCI_DMA_TODEVICE);
4065} 4176}
4066 4177
4067/** 4178/**
@@ -4078,10 +4189,11 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
4078 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4189 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4079 struct hwi_wrb_context *pwrb_context; 4190 struct hwi_wrb_context *pwrb_context;
4080 struct hwi_controller *phwi_ctrlr; 4191 struct hwi_controller *phwi_ctrlr;
4192 uint16_t cri_index = BE_GET_CRI_FROM_CID(
4193 beiscsi_conn->beiscsi_conn_cid);
4081 4194
4082 phwi_ctrlr = phba->phwi_ctrlr; 4195 phwi_ctrlr = phba->phwi_ctrlr;
4083 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid 4196 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4084 - phba->fw_config.iscsi_cid_start];
4085 4197
4086 if (io_task->cmd_bhs) { 4198 if (io_task->cmd_bhs) {
4087 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4199 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
@@ -4103,27 +4215,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
4103 io_task->psgl_handle = NULL; 4215 io_task->psgl_handle = NULL;
4104 } 4216 }
4105 } else { 4217 } else {
4106 if (!beiscsi_conn->login_in_progress) { 4218 if (!beiscsi_conn->login_in_progress)
4107 if (io_task->pwrb_handle) { 4219 beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
4108 free_wrb_handle(phba, pwrb_context,
4109 io_task->pwrb_handle);
4110 io_task->pwrb_handle = NULL;
4111 }
4112 if (io_task->psgl_handle) {
4113 spin_lock(&phba->mgmt_sgl_lock);
4114 free_mgmt_sgl_handle(phba,
4115 io_task->psgl_handle);
4116 spin_unlock(&phba->mgmt_sgl_lock);
4117 io_task->psgl_handle = NULL;
4118 }
4119 if (io_task->mtask_addr) {
4120 pci_unmap_single(phba->pcidev,
4121 io_task->mtask_addr,
4122 io_task->mtask_data_count,
4123 PCI_DMA_TODEVICE);
4124 io_task->mtask_addr = 0;
4125 }
4126 }
4127 } 4220 }
4128} 4221}
4129 4222
@@ -4146,15 +4239,14 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
4146 beiscsi_cleanup_task(task); 4239 beiscsi_cleanup_task(task);
4147 spin_unlock_bh(&session->lock); 4240 spin_unlock_bh(&session->lock);
4148 4241
4149 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid - 4242 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid);
4150 phba->fw_config.iscsi_cid_start));
4151 4243
4152 /* Check for the adapter family */ 4244 /* Check for the adapter family */
4153 if (chip_skh_r(phba->pcidev)) 4245 if (is_chip_be2_be3r(phba))
4154 beiscsi_offload_cxn_v2(params, pwrb_handle);
4155 else
4156 beiscsi_offload_cxn_v0(params, pwrb_handle, 4246 beiscsi_offload_cxn_v0(params, pwrb_handle,
4157 phba->init_mem); 4247 phba->init_mem);
4248 else
4249 beiscsi_offload_cxn_v2(params, pwrb_handle);
4158 4250
4159 be_dws_le_to_cpu(pwrb_handle->pwrb, 4251 be_dws_le_to_cpu(pwrb_handle->pwrb,
4160 sizeof(struct iscsi_target_context_update_wrb)); 4252 sizeof(struct iscsi_target_context_update_wrb));
@@ -4194,6 +4286,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4194 struct hwi_wrb_context *pwrb_context; 4286 struct hwi_wrb_context *pwrb_context;
4195 struct hwi_controller *phwi_ctrlr; 4287 struct hwi_controller *phwi_ctrlr;
4196 itt_t itt; 4288 itt_t itt;
4289 uint16_t cri_index = 0;
4197 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4290 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4198 dma_addr_t paddr; 4291 dma_addr_t paddr;
4199 4292
@@ -4223,8 +4316,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4223 goto free_hndls; 4316 goto free_hndls;
4224 } 4317 }
4225 io_task->pwrb_handle = alloc_wrb_handle(phba, 4318 io_task->pwrb_handle = alloc_wrb_handle(phba,
4226 beiscsi_conn->beiscsi_conn_cid - 4319 beiscsi_conn->beiscsi_conn_cid);
4227 phba->fw_config.iscsi_cid_start);
4228 if (!io_task->pwrb_handle) { 4320 if (!io_task->pwrb_handle) {
4229 beiscsi_log(phba, KERN_ERR, 4321 beiscsi_log(phba, KERN_ERR,
4230 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4322 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@@ -4236,6 +4328,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4236 } else { 4328 } else {
4237 io_task->scsi_cmnd = NULL; 4329 io_task->scsi_cmnd = NULL;
4238 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 4330 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
4331 beiscsi_conn->task = task;
4239 if (!beiscsi_conn->login_in_progress) { 4332 if (!beiscsi_conn->login_in_progress) {
4240 spin_lock(&phba->mgmt_sgl_lock); 4333 spin_lock(&phba->mgmt_sgl_lock);
4241 io_task->psgl_handle = (struct sgl_handle *) 4334 io_task->psgl_handle = (struct sgl_handle *)
@@ -4257,8 +4350,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4257 io_task->psgl_handle; 4350 io_task->psgl_handle;
4258 io_task->pwrb_handle = 4351 io_task->pwrb_handle =
4259 alloc_wrb_handle(phba, 4352 alloc_wrb_handle(phba,
4260 beiscsi_conn->beiscsi_conn_cid - 4353 beiscsi_conn->beiscsi_conn_cid);
4261 phba->fw_config.iscsi_cid_start);
4262 if (!io_task->pwrb_handle) { 4354 if (!io_task->pwrb_handle) {
4263 beiscsi_log(phba, KERN_ERR, 4355 beiscsi_log(phba, KERN_ERR,
4264 BEISCSI_LOG_IO | 4356 BEISCSI_LOG_IO |
@@ -4278,7 +4370,6 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4278 io_task->pwrb_handle = 4370 io_task->pwrb_handle =
4279 beiscsi_conn->plogin_wrb_handle; 4371 beiscsi_conn->plogin_wrb_handle;
4280 } 4372 }
4281 beiscsi_conn->task = task;
4282 } else { 4373 } else {
4283 spin_lock(&phba->mgmt_sgl_lock); 4374 spin_lock(&phba->mgmt_sgl_lock);
4284 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); 4375 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
@@ -4295,8 +4386,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4295 } 4386 }
4296 io_task->pwrb_handle = 4387 io_task->pwrb_handle =
4297 alloc_wrb_handle(phba, 4388 alloc_wrb_handle(phba,
4298 beiscsi_conn->beiscsi_conn_cid - 4389 beiscsi_conn->beiscsi_conn_cid);
4299 phba->fw_config.iscsi_cid_start);
4300 if (!io_task->pwrb_handle) { 4390 if (!io_task->pwrb_handle) {
4301 beiscsi_log(phba, KERN_ERR, 4391 beiscsi_log(phba, KERN_ERR,
4302 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4392 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@@ -4324,12 +4414,13 @@ free_io_hndls:
4324free_mgmt_hndls: 4414free_mgmt_hndls:
4325 spin_lock(&phba->mgmt_sgl_lock); 4415 spin_lock(&phba->mgmt_sgl_lock);
4326 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4416 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
4417 io_task->psgl_handle = NULL;
4327 spin_unlock(&phba->mgmt_sgl_lock); 4418 spin_unlock(&phba->mgmt_sgl_lock);
4328free_hndls: 4419free_hndls:
4329 phwi_ctrlr = phba->phwi_ctrlr; 4420 phwi_ctrlr = phba->phwi_ctrlr;
4330 pwrb_context = &phwi_ctrlr->wrb_context[ 4421 cri_index = BE_GET_CRI_FROM_CID(
4331 beiscsi_conn->beiscsi_conn_cid - 4422 beiscsi_conn->beiscsi_conn_cid);
4332 phba->fw_config.iscsi_cid_start]; 4423 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4333 if (io_task->pwrb_handle) 4424 if (io_task->pwrb_handle)
4334 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4425 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
4335 io_task->pwrb_handle = NULL; 4426 io_task->pwrb_handle = NULL;
@@ -4351,7 +4442,6 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
4351 unsigned int doorbell = 0; 4442 unsigned int doorbell = 0;
4352 4443
4353 pwrb = io_task->pwrb_handle->pwrb; 4444 pwrb = io_task->pwrb_handle->pwrb;
4354 memset(pwrb, 0, sizeof(*pwrb));
4355 4445
4356 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; 4446 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
4357 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4447 io_task->bhs_len = sizeof(struct be_cmd_bhs);
@@ -4465,19 +4555,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
4465 pwrb = io_task->pwrb_handle->pwrb; 4555 pwrb = io_task->pwrb_handle->pwrb;
4466 memset(pwrb, 0, sizeof(*pwrb)); 4556 memset(pwrb, 0, sizeof(*pwrb));
4467 4557
4468 if (chip_skh_r(phba->pcidev)) { 4558 if (is_chip_be2_be3r(phba)) {
4469 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4470 be32_to_cpu(task->cmdsn));
4471 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4472 io_task->pwrb_handle->wrb_index);
4473 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4474 io_task->psgl_handle->sgl_index);
4475 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
4476 task->data_count);
4477 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
4478 io_task->pwrb_handle->nxt_wrb_index);
4479 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
4480 } else {
4481 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4559 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4482 be32_to_cpu(task->cmdsn)); 4560 be32_to_cpu(task->cmdsn));
4483 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4561 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
@@ -4489,6 +4567,18 @@ static int beiscsi_mtask(struct iscsi_task *task)
4489 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4567 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4490 io_task->pwrb_handle->nxt_wrb_index); 4568 io_task->pwrb_handle->nxt_wrb_index);
4491 pwrb_typeoffset = BE_WRB_TYPE_OFFSET; 4569 pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
4570 } else {
4571 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4572 be32_to_cpu(task->cmdsn));
4573 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4574 io_task->pwrb_handle->wrb_index);
4575 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4576 io_task->psgl_handle->sgl_index);
4577 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
4578 task->data_count);
4579 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
4580 io_task->pwrb_handle->nxt_wrb_index);
4581 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
4492 } 4582 }
4493 4583
4494 4584
@@ -4501,19 +4591,19 @@ static int beiscsi_mtask(struct iscsi_task *task)
4501 case ISCSI_OP_NOOP_OUT: 4591 case ISCSI_OP_NOOP_OUT:
4502 if (task->hdr->ttt != ISCSI_RESERVED_TAG) { 4592 if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4503 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4593 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
4504 if (chip_skh_r(phba->pcidev)) 4594 if (is_chip_be2_be3r(phba))
4505 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4595 AMAP_SET_BITS(struct amap_iscsi_wrb,
4506 dmsg, pwrb, 1); 4596 dmsg, pwrb, 1);
4507 else 4597 else
4508 AMAP_SET_BITS(struct amap_iscsi_wrb, 4598 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
4509 dmsg, pwrb, 1); 4599 dmsg, pwrb, 1);
4510 } else { 4600 } else {
4511 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); 4601 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
4512 if (chip_skh_r(phba->pcidev)) 4602 if (is_chip_be2_be3r(phba))
4513 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4603 AMAP_SET_BITS(struct amap_iscsi_wrb,
4514 dmsg, pwrb, 0); 4604 dmsg, pwrb, 0);
4515 else 4605 else
4516 AMAP_SET_BITS(struct amap_iscsi_wrb, 4606 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
4517 dmsg, pwrb, 0); 4607 dmsg, pwrb, 0);
4518 } 4608 }
4519 hwi_write_buffer(pwrb, task); 4609 hwi_write_buffer(pwrb, task);
@@ -4540,9 +4630,9 @@ static int beiscsi_mtask(struct iscsi_task *task)
4540 } 4630 }
4541 4631
4542 /* Set the task type */ 4632 /* Set the task type */
4543 io_task->wrb_type = (chip_skh_r(phba->pcidev)) ? 4633 io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
4544 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb) : 4634 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
4545 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb); 4635 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb);
4546 4636
4547 doorbell |= cid & DB_WRB_POST_CID_MASK; 4637 doorbell |= cid & DB_WRB_POST_CID_MASK;
4548 doorbell |= (io_task->pwrb_handle->wrb_index & 4638 doorbell |= (io_task->pwrb_handle->wrb_index &
@@ -4834,6 +4924,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
4834 case OC_SKH_ID1: 4924 case OC_SKH_ID1:
4835 phba->generation = BE_GEN4; 4925 phba->generation = BE_GEN4;
4836 phba->iotask_fn = beiscsi_iotask_v2; 4926 phba->iotask_fn = beiscsi_iotask_v2;
4927 break;
4837 default: 4928 default:
4838 phba->generation = 0; 4929 phba->generation = 0;
4839 } 4930 }
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 5946577d79d6..2c06ef3c02ac 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2012 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -36,7 +36,7 @@
36 36
37#include "be.h" 37#include "be.h"
38#define DRV_NAME "be2iscsi" 38#define DRV_NAME "be2iscsi"
39#define BUILD_STR "10.0.272.0" 39#define BUILD_STR "10.0.467.0"
40#define BE_NAME "Emulex OneConnect" \ 40#define BE_NAME "Emulex OneConnect" \
41 "Open-iSCSI Driver version" BUILD_STR 41 "Open-iSCSI Driver version" BUILD_STR
42#define DRV_DESC BE_NAME " " "Driver" 42#define DRV_DESC BE_NAME " " "Driver"
@@ -66,8 +66,9 @@
66 66
67#define MAX_CPUS 64 67#define MAX_CPUS 64
68#define BEISCSI_MAX_NUM_CPUS 7 68#define BEISCSI_MAX_NUM_CPUS 7
69#define OC_SKH_MAX_NUM_CPUS 63 69#define OC_SKH_MAX_NUM_CPUS 31
70 70
71#define BEISCSI_VER_STRLEN 32
71 72
72#define BEISCSI_SGLIST_ELEMENTS 30 73#define BEISCSI_SGLIST_ELEMENTS 30
73 74
@@ -265,7 +266,9 @@ struct invalidate_command_table {
265 unsigned short cid; 266 unsigned short cid;
266} __packed; 267} __packed;
267 268
268#define chip_skh_r(pdev) (pdev->device == OC_SKH_ID1) 269#define chip_be2(phba) (phba->generation == BE_GEN2)
270#define chip_be3_r(phba) (phba->generation == BE_GEN3)
271#define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba)))
269struct beiscsi_hba { 272struct beiscsi_hba {
270 struct hba_parameters params; 273 struct hba_parameters params;
271 struct hwi_controller *phwi_ctrlr; 274 struct hwi_controller *phwi_ctrlr;
@@ -304,10 +307,15 @@ struct beiscsi_hba {
304 unsigned short avlbl_cids; 307 unsigned short avlbl_cids;
305 unsigned short cid_alloc; 308 unsigned short cid_alloc;
306 unsigned short cid_free; 309 unsigned short cid_free;
307 struct beiscsi_conn *conn_table[BE2_MAX_SESSIONS * 2];
308 struct list_head hba_queue; 310 struct list_head hba_queue;
311#define BE_MAX_SESSION 2048
312#define BE_SET_CID_TO_CRI(cri_index, cid) \
313 (phba->cid_to_cri_map[cid] = cri_index)
314#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])
315 unsigned short cid_to_cri_map[BE_MAX_SESSION];
309 unsigned short *cid_array; 316 unsigned short *cid_array;
310 struct iscsi_endpoint **ep_array; 317 struct iscsi_endpoint **ep_array;
318 struct beiscsi_conn **conn_table;
311 struct iscsi_boot_kset *boot_kset; 319 struct iscsi_boot_kset *boot_kset;
312 struct Scsi_Host *shost; 320 struct Scsi_Host *shost;
313 struct iscsi_iface *ipv4_iface; 321 struct iscsi_iface *ipv4_iface;
@@ -339,6 +347,7 @@ struct beiscsi_hba {
339 struct delayed_work beiscsi_hw_check_task; 347 struct delayed_work beiscsi_hw_check_task;
340 348
341 u8 mac_address[ETH_ALEN]; 349 u8 mac_address[ETH_ALEN];
350 char fw_ver_str[BEISCSI_VER_STRLEN];
342 char wq_name[20]; 351 char wq_name[20];
343 struct workqueue_struct *wq; /* The actuak work queue */ 352 struct workqueue_struct *wq; /* The actuak work queue */
344 struct be_ctrl_info ctrl; 353 struct be_ctrl_info ctrl;
@@ -563,7 +572,7 @@ struct hwi_async_pdu_context {
563 * This is a varying size list! Do not add anything 572 * This is a varying size list! Do not add anything
564 * after this entry!! 573 * after this entry!!
565 */ 574 */
566 struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2]; 575 struct hwi_async_entry *async_entry;
567}; 576};
568 577
569#define PDUCQE_CODE_MASK 0x0000003F 578#define PDUCQE_CODE_MASK 0x0000003F
@@ -749,6 +758,8 @@ void
749free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle); 758free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
750 759
751void beiscsi_process_all_cqs(struct work_struct *work); 760void beiscsi_process_all_cqs(struct work_struct *work);
761void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
762 struct iscsi_task *task);
752 763
753static inline bool beiscsi_error(struct beiscsi_hba *phba) 764static inline bool beiscsi_error(struct beiscsi_hba *phba)
754{ 765{
@@ -933,7 +944,7 @@ struct hwi_controller {
933 struct sgl_handle *psgl_handle_base; 944 struct sgl_handle *psgl_handle_base;
934 unsigned int wrb_mem_index; 945 unsigned int wrb_mem_index;
935 946
936 struct hwi_wrb_context wrb_context[BE2_MAX_SESSIONS * 2]; 947 struct hwi_wrb_context *wrb_context;
937 struct mcc_wrb *pmcc_wrb_base; 948 struct mcc_wrb *pmcc_wrb_base;
938 struct be_ring default_pdu_hdr; 949 struct be_ring default_pdu_hdr;
939 struct be_ring default_pdu_data; 950 struct be_ring default_pdu_data;
@@ -970,9 +981,7 @@ struct hwi_context_memory {
970 struct be_queue_info be_def_hdrq; 981 struct be_queue_info be_def_hdrq;
971 struct be_queue_info be_def_dataq; 982 struct be_queue_info be_def_dataq;
972 983
973 struct be_queue_info be_wrbq[BE2_MAX_SESSIONS]; 984 struct be_queue_info *be_wrbq;
974 struct be_mcc_wrb_context *pbe_mcc_context;
975
976 struct hwi_async_pdu_context *pasync_ctx; 985 struct hwi_async_pdu_context *pasync_ctx;
977}; 986};
978 987
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 55cc9902263d..245a9595a93a 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2012 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -368,6 +368,8 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
368 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 368 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
369 "BM_%d : phba->fw_config.iscsi_features = %d\n", 369 "BM_%d : phba->fw_config.iscsi_features = %d\n",
370 phba->fw_config.iscsi_features); 370 phba->fw_config.iscsi_features);
371 memcpy(phba->fw_ver_str, resp->params.hba_attribs.
372 firmware_version_string, BEISCSI_VER_STRLEN);
371 } else 373 } else
372 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 374 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
373 "BG_%d : Failed in mgmt_check_supported_fw\n"); 375 "BG_%d : Failed in mgmt_check_supported_fw\n");
@@ -1260,6 +1262,45 @@ beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr,
1260} 1262}
1261 1263
1262/** 1264/**
1265 * beiscsi_fw_ver_disp()- Display Firmware Version
1266 * @dev: ptr to device not used.
1267 * @attr: device attribute, not used.
1268 * @buf: contains formatted text Firmware version
1269 *
1270 * return
1271 * size of the formatted string
1272 **/
1273ssize_t
1274beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,
1275 char *buf)
1276{
1277 struct Scsi_Host *shost = class_to_shost(dev);
1278 struct beiscsi_hba *phba = iscsi_host_priv(shost);
1279
1280 return snprintf(buf, PAGE_SIZE, "%s\n", phba->fw_ver_str);
1281}
1282
1283/**
1284 * beiscsi_active_cid_disp()- Display Sessions Active
1285 * @dev: ptr to device not used.
1286 * @attr: device attribute, not used.
1287 * @buf: contains formatted text Session Count
1288 *
1289 * return
1290 * size of the formatted string
1291 **/
1292ssize_t
1293beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr,
1294 char *buf)
1295{
1296 struct Scsi_Host *shost = class_to_shost(dev);
1297 struct beiscsi_hba *phba = iscsi_host_priv(shost);
1298
1299 return snprintf(buf, PAGE_SIZE, "%d\n",
1300 (phba->params.cxns_per_ctrl - phba->avlbl_cids));
1301}
1302
1303/**
1263 * beiscsi_adap_family_disp()- Display adapter family. 1304 * beiscsi_adap_family_disp()- Display adapter family.
1264 * @dev: ptr to device to get priv structure 1305 * @dev: ptr to device to get priv structure
1265 * @attr: device attribute, not used. 1306 * @attr: device attribute, not used.
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 2e4968add799..04af7e74fe48 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2012 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -156,25 +156,25 @@ union invalidate_commands_params {
156} __packed; 156} __packed;
157 157
158struct mgmt_hba_attributes { 158struct mgmt_hba_attributes {
159 u8 flashrom_version_string[32]; 159 u8 flashrom_version_string[BEISCSI_VER_STRLEN];
160 u8 manufacturer_name[32]; 160 u8 manufacturer_name[BEISCSI_VER_STRLEN];
161 u32 supported_modes; 161 u32 supported_modes;
162 u8 seeprom_version_lo; 162 u8 seeprom_version_lo;
163 u8 seeprom_version_hi; 163 u8 seeprom_version_hi;
164 u8 rsvd0[2]; 164 u8 rsvd0[2];
165 u32 fw_cmd_data_struct_version; 165 u32 fw_cmd_data_struct_version;
166 u32 ep_fw_data_struct_version; 166 u32 ep_fw_data_struct_version;
167 u32 future_reserved[12]; 167 u8 ncsi_version_string[12];
168 u32 default_extended_timeout; 168 u32 default_extended_timeout;
169 u8 controller_model_number[32]; 169 u8 controller_model_number[BEISCSI_VER_STRLEN];
170 u8 controller_description[64]; 170 u8 controller_description[64];
171 u8 controller_serial_number[32]; 171 u8 controller_serial_number[BEISCSI_VER_STRLEN];
172 u8 ip_version_string[32]; 172 u8 ip_version_string[BEISCSI_VER_STRLEN];
173 u8 firmware_version_string[32]; 173 u8 firmware_version_string[BEISCSI_VER_STRLEN];
174 u8 bios_version_string[32]; 174 u8 bios_version_string[BEISCSI_VER_STRLEN];
175 u8 redboot_version_string[32]; 175 u8 redboot_version_string[BEISCSI_VER_STRLEN];
176 u8 driver_version_string[32]; 176 u8 driver_version_string[BEISCSI_VER_STRLEN];
177 u8 fw_on_flash_version_string[32]; 177 u8 fw_on_flash_version_string[BEISCSI_VER_STRLEN];
178 u32 functionalities_supported; 178 u32 functionalities_supported;
179 u16 max_cdblength; 179 u16 max_cdblength;
180 u8 asic_revision; 180 u8 asic_revision;
@@ -190,7 +190,8 @@ struct mgmt_hba_attributes {
190 u32 firmware_post_status; 190 u32 firmware_post_status;
191 u32 hba_mtu[8]; 191 u32 hba_mtu[8];
192 u8 iscsi_features; 192 u8 iscsi_features;
193 u8 future_u8[3]; 193 u8 asic_generation;
194 u8 future_u8[2];
194 u32 future_u32[3]; 195 u32 future_u32[3];
195} __packed; 196} __packed;
196 197
@@ -207,7 +208,7 @@ struct mgmt_controller_attributes {
207 u64 unique_identifier; 208 u64 unique_identifier;
208 u8 netfilters; 209 u8 netfilters;
209 u8 rsvd0[3]; 210 u8 rsvd0[3];
210 u8 future_u32[4]; 211 u32 future_u32[4];
211} __packed; 212} __packed;
212 213
213struct be_mgmt_controller_attributes { 214struct be_mgmt_controller_attributes {
@@ -311,6 +312,12 @@ int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
311ssize_t beiscsi_drvr_ver_disp(struct device *dev, 312ssize_t beiscsi_drvr_ver_disp(struct device *dev,
312 struct device_attribute *attr, char *buf); 313 struct device_attribute *attr, char *buf);
313 314
315ssize_t beiscsi_fw_ver_disp(struct device *dev,
316 struct device_attribute *attr, char *buf);
317
318ssize_t beiscsi_active_cid_disp(struct device *dev,
319 struct device_attribute *attr, char *buf);
320
314ssize_t beiscsi_adap_family_disp(struct device *dev, 321ssize_t beiscsi_adap_family_disp(struct device *dev,
315 struct device_attribute *attr, char *buf); 322 struct device_attribute *attr, char *buf);
316 323
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 11596b2c4702..08b22a901c25 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -2,7 +2,7 @@
2#define _BNX2FC_H_ 2#define _BNX2FC_H_
3/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver. 3/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
4 * 4 *
5 * Copyright (c) 2008 - 2011 Broadcom Corporation 5 * Copyright (c) 2008 - 2013 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -64,10 +64,12 @@
64#include "bnx2fc_constants.h" 64#include "bnx2fc_constants.h"
65 65
66#define BNX2FC_NAME "bnx2fc" 66#define BNX2FC_NAME "bnx2fc"
67#define BNX2FC_VERSION "1.0.13" 67#define BNX2FC_VERSION "1.0.14"
68 68
69#define PFX "bnx2fc: " 69#define PFX "bnx2fc: "
70 70
71#define BCM_CHIP_LEN 16
72
71#define BNX2X_DOORBELL_PCI_BAR 2 73#define BNX2X_DOORBELL_PCI_BAR 2
72 74
73#define BNX2FC_MAX_BD_LEN 0xffff 75#define BNX2FC_MAX_BD_LEN 0xffff
@@ -241,6 +243,8 @@ struct bnx2fc_hba {
241 int wait_for_link_down; 243 int wait_for_link_down;
242 int num_ofld_sess; 244 int num_ofld_sess;
243 struct list_head vports; 245 struct list_head vports;
246
247 char chip_num[BCM_CHIP_LEN];
244}; 248};
245 249
246struct bnx2fc_interface { 250struct bnx2fc_interface {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index bdbbb13b8534..b1c9a4f8caee 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -3,7 +3,7 @@
3 * This file contains helper routines that handle ELS requests 3 * This file contains helper routines that handle ELS requests
4 * and responses. 4 * and responses.
5 * 5 *
6 * Copyright (c) 2008 - 2011 Broadcom Corporation 6 * Copyright (c) 2008 - 2013 Broadcom Corporation
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 7dffec1e5715..69ac55495c1d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -3,7 +3,7 @@
3 * cnic modules to create FCoE instances, send/receive non-offloaded 3 * cnic modules to create FCoE instances, send/receive non-offloaded
4 * FIP/FCoE packets, listen to link events etc. 4 * FIP/FCoE packets, listen to link events etc.
5 * 5 *
6 * Copyright (c) 2008 - 2011 Broadcom Corporation 6 * Copyright (c) 2008 - 2013 Broadcom Corporation
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
22 22
23#define DRV_MODULE_NAME "bnx2fc" 23#define DRV_MODULE_NAME "bnx2fc"
24#define DRV_MODULE_VERSION BNX2FC_VERSION 24#define DRV_MODULE_VERSION BNX2FC_VERSION
25#define DRV_MODULE_RELDATE "Dec 21, 2012" 25#define DRV_MODULE_RELDATE "Mar 08, 2013"
26 26
27 27
28static char version[] = 28static char version[] =
@@ -679,6 +679,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
679{ 679{
680 struct fcoe_port *port = lport_priv(lport); 680 struct fcoe_port *port = lport_priv(lport);
681 struct bnx2fc_interface *interface = port->priv; 681 struct bnx2fc_interface *interface = port->priv;
682 struct bnx2fc_hba *hba = interface->hba;
682 struct Scsi_Host *shost = lport->host; 683 struct Scsi_Host *shost = lport->host;
683 int rc = 0; 684 int rc = 0;
684 685
@@ -699,8 +700,9 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
699 } 700 }
700 if (!lport->vport) 701 if (!lport->vport)
701 fc_host_max_npiv_vports(lport->host) = USHRT_MAX; 702 fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
702 sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s", 703 snprintf(fc_host_symbolic_name(lport->host), 256,
703 BNX2FC_NAME, BNX2FC_VERSION, 704 "%s (Broadcom %s) v%s over %s",
705 BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION,
704 interface->netdev->name); 706 interface->netdev->name);
705 707
706 return 0; 708 return 0;
@@ -1656,23 +1658,60 @@ mem_err:
1656static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba) 1658static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
1657{ 1659{
1658 struct cnic_dev *cnic; 1660 struct cnic_dev *cnic;
1661 struct pci_dev *pdev;
1659 1662
1660 if (!hba->cnic) { 1663 if (!hba->cnic) {
1661 printk(KERN_ERR PFX "cnic is NULL\n"); 1664 printk(KERN_ERR PFX "cnic is NULL\n");
1662 return -ENODEV; 1665 return -ENODEV;
1663 } 1666 }
1664 cnic = hba->cnic; 1667 cnic = hba->cnic;
1665 hba->pcidev = cnic->pcidev; 1668 pdev = hba->pcidev = cnic->pcidev;
1666 if (hba->pcidev) 1669 if (!hba->pcidev)
1667 pci_dev_get(hba->pcidev); 1670 return -ENODEV;
1668 1671
1672 switch (pdev->device) {
1673 case PCI_DEVICE_ID_NX2_57710:
1674 strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN);
1675 break;
1676 case PCI_DEVICE_ID_NX2_57711:
1677 strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN);
1678 break;
1679 case PCI_DEVICE_ID_NX2_57712:
1680 case PCI_DEVICE_ID_NX2_57712_MF:
1681 case PCI_DEVICE_ID_NX2_57712_VF:
1682 strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN);
1683 break;
1684 case PCI_DEVICE_ID_NX2_57800:
1685 case PCI_DEVICE_ID_NX2_57800_MF:
1686 case PCI_DEVICE_ID_NX2_57800_VF:
1687 strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN);
1688 break;
1689 case PCI_DEVICE_ID_NX2_57810:
1690 case PCI_DEVICE_ID_NX2_57810_MF:
1691 case PCI_DEVICE_ID_NX2_57810_VF:
1692 strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN);
1693 break;
1694 case PCI_DEVICE_ID_NX2_57840:
1695 case PCI_DEVICE_ID_NX2_57840_MF:
1696 case PCI_DEVICE_ID_NX2_57840_VF:
1697 case PCI_DEVICE_ID_NX2_57840_2_20:
1698 case PCI_DEVICE_ID_NX2_57840_4_10:
1699 strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN);
1700 break;
1701 default:
1702 pr_err(PFX "Unknown device id 0x%x\n", pdev->device);
1703 break;
1704 }
1705 pci_dev_get(hba->pcidev);
1669 return 0; 1706 return 0;
1670} 1707}
1671 1708
1672static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba) 1709static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
1673{ 1710{
1674 if (hba->pcidev) 1711 if (hba->pcidev) {
1712 hba->chip_num[0] = '\0';
1675 pci_dev_put(hba->pcidev); 1713 pci_dev_put(hba->pcidev);
1714 }
1676 hba->pcidev = NULL; 1715 hba->pcidev = NULL;
1677} 1716}
1678 1717
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 50510ffe1bf5..c0d035a8f8f9 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -2,7 +2,7 @@
2 * This file contains the code that low level functions that interact 2 * This file contains the code that low level functions that interact
3 * with 57712 FCoE firmware. 3 * with 57712 FCoE firmware.
4 * 4 *
5 * Copyright (c) 2008 - 2011 Broadcom Corporation 5 * Copyright (c) 2008 - 2013 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -126,7 +126,11 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
126 fcoe_init3.error_bit_map_lo = 0xffffffff; 126 fcoe_init3.error_bit_map_lo = 0xffffffff;
127 fcoe_init3.error_bit_map_hi = 0xffffffff; 127 fcoe_init3.error_bit_map_hi = 0xffffffff;
128 128
129 fcoe_init3.perf_config = 1; 129 /*
130 * enable both cached connection and cached tasks
131 * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both
132 */
133 fcoe_init3.perf_config = 3;
130 134
131 kwqe_arr[0] = (struct kwqe *) &fcoe_init1; 135 kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
132 kwqe_arr[1] = (struct kwqe *) &fcoe_init2; 136 kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 723a9a8ba5ee..575142e92d9c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1,7 +1,7 @@
1/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver. 1/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * IO manager and SCSI IO processing. 2 * IO manager and SCSI IO processing.
3 * 3 *
4 * Copyright (c) 2008 - 2011 Broadcom Corporation 4 * Copyright (c) 2008 - 2013 Broadcom Corporation
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -1270,8 +1270,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1270 1270
1271 spin_lock_bh(&tgt->tgt_lock); 1271 spin_lock_bh(&tgt->tgt_lock);
1272 io_req->wait_for_comp = 0; 1272 io_req->wait_for_comp = 0;
1273 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 1273 if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
1274 &io_req->req_flags))) { 1274 BNX2FC_IO_DBG(io_req, "IO completed in a different context\n");
1275 rc = SUCCESS;
1276 } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
1277 &io_req->req_flags))) {
1275 /* Let the scsi-ml try to recover this command */ 1278 /* Let the scsi-ml try to recover this command */
1276 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", 1279 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
1277 io_req->xid); 1280 io_req->xid);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index c57a3bb8a9fb..4d93177dfb53 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -2,7 +2,7 @@
2 * Handles operations such as session offload/upload etc, and manages 2 * Handles operations such as session offload/upload etc, and manages
3 * session resources such as connection id and qp resources. 3 * session resources such as connection id and qp resources.
4 * 4 *
5 * Copyright (c) 2008 - 2011 Broadcom Corporation 5 * Copyright (c) 2008 - 2013 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h
index 0f9c04175b11..372a67d122d3 100644
--- a/drivers/scsi/csiostor/csio_lnode.h
+++ b/drivers/scsi/csiostor/csio_lnode.h
@@ -114,7 +114,7 @@ struct csio_lnode_stats {
114 uint32_t n_rnode_match; /* matched rnode */ 114 uint32_t n_rnode_match; /* matched rnode */
115 uint32_t n_dev_loss_tmo; /* Device loss timeout */ 115 uint32_t n_dev_loss_tmo; /* Device loss timeout */
116 uint32_t n_fdmi_err; /* fdmi err */ 116 uint32_t n_fdmi_err; /* fdmi err */
117 uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */ 117 uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */
118 enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */ 118 enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */
119 uint32_t n_rnode_alloc; /* rnode allocated */ 119 uint32_t n_rnode_alloc; /* rnode allocated */
120 uint32_t n_rnode_free; /* rnode freed */ 120 uint32_t n_rnode_free; /* rnode freed */
diff --git a/drivers/scsi/csiostor/csio_rnode.h b/drivers/scsi/csiostor/csio_rnode.h
index 65940096a80d..433434221222 100644
--- a/drivers/scsi/csiostor/csio_rnode.h
+++ b/drivers/scsi/csiostor/csio_rnode.h
@@ -63,7 +63,7 @@ struct csio_rnode_stats {
63 uint32_t n_err_nomem; /* error nomem */ 63 uint32_t n_err_nomem; /* error nomem */
64 uint32_t n_evt_unexp; /* unexpected event */ 64 uint32_t n_evt_unexp; /* unexpected event */
65 uint32_t n_evt_drop; /* unexpected event */ 65 uint32_t n_evt_drop; /* unexpected event */
66 uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */ 66 uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */
67 enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */ 67 enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */
68 uint32_t n_lun_rst; /* Number of resets of 68 uint32_t n_lun_rst; /* Number of resets of
69 * of LUNs under this 69 * of LUNs under this
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 98436c363035..b6d1f92ed33c 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -38,7 +38,7 @@
38 38
39#define DRV_NAME "fnic" 39#define DRV_NAME "fnic"
40#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 40#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
41#define DRV_VERSION "1.5.0.2" 41#define DRV_VERSION "1.5.0.22"
42#define PFX DRV_NAME ": " 42#define PFX DRV_NAME ": "
43#define DFX DRV_NAME "%d: " 43#define DFX DRV_NAME "%d: "
44 44
@@ -192,6 +192,18 @@ enum fnic_state {
192 192
193struct mempool; 193struct mempool;
194 194
195enum fnic_evt {
196 FNIC_EVT_START_VLAN_DISC = 1,
197 FNIC_EVT_START_FCF_DISC = 2,
198 FNIC_EVT_MAX,
199};
200
201struct fnic_event {
202 struct list_head list;
203 struct fnic *fnic;
204 enum fnic_evt event;
205};
206
195/* Per-instance private data structure */ 207/* Per-instance private data structure */
196struct fnic { 208struct fnic {
197 struct fc_lport *lport; 209 struct fc_lport *lport;
@@ -254,6 +266,18 @@ struct fnic {
254 struct sk_buff_head frame_queue; 266 struct sk_buff_head frame_queue;
255 struct sk_buff_head tx_queue; 267 struct sk_buff_head tx_queue;
256 268
269 /*** FIP related data members -- start ***/
270 void (*set_vlan)(struct fnic *, u16 vlan);
271 struct work_struct fip_frame_work;
272 struct sk_buff_head fip_frame_queue;
273 struct timer_list fip_timer;
274 struct list_head vlans;
275 spinlock_t vlans_lock;
276
277 struct work_struct event_work;
278 struct list_head evlist;
279 /*** FIP related data members -- end ***/
280
257 /* copy work queue cache line section */ 281 /* copy work queue cache line section */
258 ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX]; 282 ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
259 /* completion queue cache line section */ 283 /* completion queue cache line section */
@@ -278,6 +302,7 @@ static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
278} 302}
279 303
280extern struct workqueue_struct *fnic_event_queue; 304extern struct workqueue_struct *fnic_event_queue;
305extern struct workqueue_struct *fnic_fip_queue;
281extern struct device_attribute *fnic_attrs[]; 306extern struct device_attribute *fnic_attrs[];
282 307
283void fnic_clear_intr_mode(struct fnic *fnic); 308void fnic_clear_intr_mode(struct fnic *fnic);
@@ -289,6 +314,7 @@ int fnic_send(struct fc_lport *, struct fc_frame *);
289void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); 314void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
290void fnic_handle_frame(struct work_struct *work); 315void fnic_handle_frame(struct work_struct *work);
291void fnic_handle_link(struct work_struct *work); 316void fnic_handle_link(struct work_struct *work);
317void fnic_handle_event(struct work_struct *work);
292int fnic_rq_cmpl_handler(struct fnic *fnic, int); 318int fnic_rq_cmpl_handler(struct fnic *fnic, int);
293int fnic_alloc_rq_frame(struct vnic_rq *rq); 319int fnic_alloc_rq_frame(struct vnic_rq *rq);
294void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); 320void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
@@ -321,6 +347,12 @@ void fnic_handle_link_event(struct fnic *fnic);
321 347
322int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *); 348int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
323 349
350void fnic_handle_fip_frame(struct work_struct *work);
351void fnic_handle_fip_event(struct fnic *fnic);
352void fnic_fcoe_reset_vlans(struct fnic *fnic);
353void fnic_fcoe_evlist_free(struct fnic *fnic);
354extern void fnic_handle_fip_timer(struct fnic *fnic);
355
324static inline int 356static inline int
325fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) 357fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
326{ 358{
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 483eb9dbe663..006fa92a02df 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -31,12 +31,20 @@
31#include <scsi/libfc.h> 31#include <scsi/libfc.h>
32#include "fnic_io.h" 32#include "fnic_io.h"
33#include "fnic.h" 33#include "fnic.h"
34#include "fnic_fip.h"
34#include "cq_enet_desc.h" 35#include "cq_enet_desc.h"
35#include "cq_exch_desc.h" 36#include "cq_exch_desc.h"
36 37
38static u8 fcoe_all_fcfs[ETH_ALEN];
39struct workqueue_struct *fnic_fip_queue;
37struct workqueue_struct *fnic_event_queue; 40struct workqueue_struct *fnic_event_queue;
38 41
39static void fnic_set_eth_mode(struct fnic *); 42static void fnic_set_eth_mode(struct fnic *);
43static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
44static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
45static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
46static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
47static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
40 48
41void fnic_handle_link(struct work_struct *work) 49void fnic_handle_link(struct work_struct *work)
42{ 50{
@@ -69,6 +77,11 @@ void fnic_handle_link(struct work_struct *work)
69 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 77 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
70 "link down\n"); 78 "link down\n");
71 fcoe_ctlr_link_down(&fnic->ctlr); 79 fcoe_ctlr_link_down(&fnic->ctlr);
80 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
81 /* start FCoE VLAN discovery */
82 fnic_fcoe_send_vlan_req(fnic);
83 return;
84 }
72 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 85 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
73 "link up\n"); 86 "link up\n");
74 fcoe_ctlr_link_up(&fnic->ctlr); 87 fcoe_ctlr_link_up(&fnic->ctlr);
@@ -79,6 +92,11 @@ void fnic_handle_link(struct work_struct *work)
79 } else if (fnic->link_status) { 92 } else if (fnic->link_status) {
80 /* DOWN -> UP */ 93 /* DOWN -> UP */
81 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 94 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
95 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
96 /* start FCoE VLAN discovery */
97 fnic_fcoe_send_vlan_req(fnic);
98 return;
99 }
82 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); 100 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
83 fcoe_ctlr_link_up(&fnic->ctlr); 101 fcoe_ctlr_link_up(&fnic->ctlr);
84 } else { 102 } else {
@@ -128,6 +146,441 @@ void fnic_handle_frame(struct work_struct *work)
128 } 146 }
129} 147}
130 148
149void fnic_fcoe_evlist_free(struct fnic *fnic)
150{
151 struct fnic_event *fevt = NULL;
152 struct fnic_event *next = NULL;
153 unsigned long flags;
154
155 spin_lock_irqsave(&fnic->fnic_lock, flags);
156 if (list_empty(&fnic->evlist)) {
157 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
158 return;
159 }
160
161 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
162 list_del(&fevt->list);
163 kfree(fevt);
164 }
165 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
166}
167
168void fnic_handle_event(struct work_struct *work)
169{
170 struct fnic *fnic = container_of(work, struct fnic, event_work);
171 struct fnic_event *fevt = NULL;
172 struct fnic_event *next = NULL;
173 unsigned long flags;
174
175 spin_lock_irqsave(&fnic->fnic_lock, flags);
176 if (list_empty(&fnic->evlist)) {
177 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
178 return;
179 }
180
181 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
182 if (fnic->stop_rx_link_events) {
183 list_del(&fevt->list);
184 kfree(fevt);
185 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
186 return;
187 }
188 /*
189 * If we're in a transitional state, just re-queue and return.
190 * The queue will be serviced when we get to a stable state.
191 */
192 if (fnic->state != FNIC_IN_FC_MODE &&
193 fnic->state != FNIC_IN_ETH_MODE) {
194 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
195 return;
196 }
197
198 list_del(&fevt->list);
199 switch (fevt->event) {
200 case FNIC_EVT_START_VLAN_DISC:
201 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
202 fnic_fcoe_send_vlan_req(fnic);
203 spin_lock_irqsave(&fnic->fnic_lock, flags);
204 break;
205 case FNIC_EVT_START_FCF_DISC:
206 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
207 "Start FCF Discovery\n");
208 fnic_fcoe_start_fcf_disc(fnic);
209 break;
210 default:
211 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
212 "Unknown event 0x%x\n", fevt->event);
213 break;
214 }
215 kfree(fevt);
216 }
217 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
218}
219
220/**
221 * Check if the Received FIP FLOGI frame is rejected
222 * @fip: The FCoE controller that received the frame
223 * @skb: The received FIP frame
224 *
225 * Returns non-zero if the frame is rejected with unsupported cmd with
226 * insufficient resource els explanation.
227 */
228static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
229 struct sk_buff *skb)
230{
231 struct fc_lport *lport = fip->lp;
232 struct fip_header *fiph;
233 struct fc_frame_header *fh = NULL;
234 struct fip_desc *desc;
235 struct fip_encaps *els;
236 enum fip_desc_type els_dtype = 0;
237 u16 op;
238 u8 els_op;
239 u8 sub;
240
241 size_t els_len = 0;
242 size_t rlen;
243 size_t dlen = 0;
244
245 if (skb_linearize(skb))
246 return 0;
247
248 if (skb->len < sizeof(*fiph))
249 return 0;
250
251 fiph = (struct fip_header *)skb->data;
252 op = ntohs(fiph->fip_op);
253 sub = fiph->fip_subcode;
254
255 if (op != FIP_OP_LS)
256 return 0;
257
258 if (sub != FIP_SC_REP)
259 return 0;
260
261 rlen = ntohs(fiph->fip_dl_len) * 4;
262 if (rlen + sizeof(*fiph) > skb->len)
263 return 0;
264
265 desc = (struct fip_desc *)(fiph + 1);
266 dlen = desc->fip_dlen * FIP_BPW;
267
268 if (desc->fip_dtype == FIP_DT_FLOGI) {
269
270 shost_printk(KERN_DEBUG, lport->host,
271 " FIP TYPE FLOGI: fab name:%llx "
272 "vfid:%d map:%x\n",
273 fip->sel_fcf->fabric_name, fip->sel_fcf->vfid,
274 fip->sel_fcf->fc_map);
275 if (dlen < sizeof(*els) + sizeof(*fh) + 1)
276 return 0;
277
278 els_len = dlen - sizeof(*els);
279 els = (struct fip_encaps *)desc;
280 fh = (struct fc_frame_header *)(els + 1);
281 els_dtype = desc->fip_dtype;
282
283 if (!fh)
284 return 0;
285
286 /*
287 * ELS command code, reason and explanation should be = Reject,
288 * unsupported command and insufficient resource
289 */
290 els_op = *(u8 *)(fh + 1);
291 if (els_op == ELS_LS_RJT) {
292 shost_printk(KERN_INFO, lport->host,
293 "Flogi Request Rejected by Switch\n");
294 return 1;
295 }
296 shost_printk(KERN_INFO, lport->host,
297 "Flogi Request Accepted by Switch\n");
298 }
299 return 0;
300}
301
302static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
303{
304 struct fcoe_ctlr *fip = &fnic->ctlr;
305 struct sk_buff *skb;
306 char *eth_fr;
307 int fr_len;
308 struct fip_vlan *vlan;
309 u64 vlan_tov;
310
311 fnic_fcoe_reset_vlans(fnic);
312 fnic->set_vlan(fnic, 0);
313 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
314 "Sending VLAN request...\n");
315 skb = dev_alloc_skb(sizeof(struct fip_vlan));
316 if (!skb)
317 return;
318
319 fr_len = sizeof(*vlan);
320 eth_fr = (char *)skb->data;
321 vlan = (struct fip_vlan *)eth_fr;
322
323 memset(vlan, 0, sizeof(*vlan));
324 memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
325 memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
326 vlan->eth.h_proto = htons(ETH_P_FIP);
327
328 vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
329 vlan->fip.fip_op = htons(FIP_OP_VLAN);
330 vlan->fip.fip_subcode = FIP_SC_VL_REQ;
331 vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
332
333 vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
334 vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
335 memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
336
337 vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
338 vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
339 put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
340
341 skb_put(skb, sizeof(*vlan));
342 skb->protocol = htons(ETH_P_FIP);
343 skb_reset_mac_header(skb);
344 skb_reset_network_header(skb);
345 fip->send(fip, skb);
346
347 /* set a timer so that we can retry if there no response */
348 vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
349 mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
350}
351
352static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
353{
354 struct fcoe_ctlr *fip = &fnic->ctlr;
355 struct fip_header *fiph;
356 struct fip_desc *desc;
357 u16 vid;
358 size_t rlen;
359 size_t dlen;
360 struct fcoe_vlan *vlan;
361 u64 sol_time;
362 unsigned long flags;
363
364 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
365 "Received VLAN response...\n");
366
367 fiph = (struct fip_header *) skb->data;
368
369 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
370 "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
371 ntohs(fiph->fip_op), fiph->fip_subcode);
372
373 rlen = ntohs(fiph->fip_dl_len) * 4;
374 fnic_fcoe_reset_vlans(fnic);
375 spin_lock_irqsave(&fnic->vlans_lock, flags);
376 desc = (struct fip_desc *)(fiph + 1);
377 while (rlen > 0) {
378 dlen = desc->fip_dlen * FIP_BPW;
379 switch (desc->fip_dtype) {
380 case FIP_DT_VLAN:
381 vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
382 shost_printk(KERN_INFO, fnic->lport->host,
383 "process_vlan_resp: FIP VLAN %d\n", vid);
384 vlan = kmalloc(sizeof(*vlan),
385 GFP_ATOMIC);
386 if (!vlan) {
387 /* retry from timer */
388 spin_unlock_irqrestore(&fnic->vlans_lock,
389 flags);
390 goto out;
391 }
392 memset(vlan, 0, sizeof(struct fcoe_vlan));
393 vlan->vid = vid & 0x0fff;
394 vlan->state = FIP_VLAN_AVAIL;
395 list_add_tail(&vlan->list, &fnic->vlans);
396 break;
397 }
398 desc = (struct fip_desc *)((char *)desc + dlen);
399 rlen -= dlen;
400 }
401
402 /* any VLAN descriptors present ? */
403 if (list_empty(&fnic->vlans)) {
404 /* retry from timer */
405 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
406 "No VLAN descriptors in FIP VLAN response\n");
407 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
408 goto out;
409 }
410
411 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
412 fnic->set_vlan(fnic, vlan->vid);
413 vlan->state = FIP_VLAN_SENT; /* sent now */
414 vlan->sol_count++;
415 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
416
417 /* start the solicitation */
418 fcoe_ctlr_link_up(fip);
419
420 sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
421 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
422out:
423 return;
424}
425
426static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
427{
428 unsigned long flags;
429 struct fcoe_vlan *vlan;
430 u64 sol_time;
431
432 spin_lock_irqsave(&fnic->vlans_lock, flags);
433 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
434 fnic->set_vlan(fnic, vlan->vid);
435 vlan->state = FIP_VLAN_SENT; /* sent now */
436 vlan->sol_count = 1;
437 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
438
439 /* start the solicitation */
440 fcoe_ctlr_link_up(&fnic->ctlr);
441
442 sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
443 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
444}
445
446static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
447{
448 unsigned long flags;
449 struct fcoe_vlan *fvlan;
450
451 spin_lock_irqsave(&fnic->vlans_lock, flags);
452 if (list_empty(&fnic->vlans)) {
453 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
454 return -EINVAL;
455 }
456
457 fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
458 if (fvlan->state == FIP_VLAN_USED) {
459 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
460 return 0;
461 }
462
463 if (fvlan->state == FIP_VLAN_SENT) {
464 fvlan->state = FIP_VLAN_USED;
465 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
466 return 0;
467 }
468 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
469 return -EINVAL;
470}
471
472static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
473{
474 struct fnic_event *fevt;
475 unsigned long flags;
476
477 fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
478 if (!fevt)
479 return;
480
481 fevt->fnic = fnic;
482 fevt->event = ev;
483
484 spin_lock_irqsave(&fnic->fnic_lock, flags);
485 list_add_tail(&fevt->list, &fnic->evlist);
486 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
487
488 schedule_work(&fnic->event_work);
489}
490
491static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
492{
493 struct fip_header *fiph;
494 int ret = 1;
495 u16 op;
496 u8 sub;
497
498 if (!skb || !(skb->data))
499 return -1;
500
501 if (skb_linearize(skb))
502 goto drop;
503
504 fiph = (struct fip_header *)skb->data;
505 op = ntohs(fiph->fip_op);
506 sub = fiph->fip_subcode;
507
508 if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
509 goto drop;
510
511 if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
512 goto drop;
513
514 if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
515 if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
516 goto drop;
517 /* pass it on to fcoe */
518 ret = 1;
519 } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) {
520 /* set the vlan as used */
521 fnic_fcoe_process_vlan_resp(fnic, skb);
522 ret = 0;
523 } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
524 /* received CVL request, restart vlan disc */
525 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
526 /* pass it on to fcoe */
527 ret = 1;
528 }
529drop:
530 return ret;
531}
532
533void fnic_handle_fip_frame(struct work_struct *work)
534{
535 struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
536 unsigned long flags;
537 struct sk_buff *skb;
538 struct ethhdr *eh;
539
540 while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
541 spin_lock_irqsave(&fnic->fnic_lock, flags);
542 if (fnic->stop_rx_link_events) {
543 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
544 dev_kfree_skb(skb);
545 return;
546 }
547 /*
548 * If we're in a transitional state, just re-queue and return.
549 * The queue will be serviced when we get to a stable state.
550 */
551 if (fnic->state != FNIC_IN_FC_MODE &&
552 fnic->state != FNIC_IN_ETH_MODE) {
553 skb_queue_head(&fnic->fip_frame_queue, skb);
554 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
555 return;
556 }
557 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
558 eh = (struct ethhdr *)skb->data;
559 if (eh->h_proto == htons(ETH_P_FIP)) {
560 skb_pull(skb, sizeof(*eh));
561 if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
562 dev_kfree_skb(skb);
563 continue;
564 }
565 /*
566 * If there's FLOGI rejects - clear all
567 * fcf's & restart from scratch
568 */
569 if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
570 shost_printk(KERN_INFO, fnic->lport->host,
571 "Trigger a Link down - VLAN Disc\n");
572 fcoe_ctlr_link_down(&fnic->ctlr);
573 /* start FCoE VLAN discovery */
574 fnic_fcoe_send_vlan_req(fnic);
575 dev_kfree_skb(skb);
576 continue;
577 }
578 fcoe_ctlr_recv(&fnic->ctlr, skb);
579 continue;
580 }
581 }
582}
583
131/** 584/**
132 * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame. 585 * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
133 * @fnic: fnic instance. 586 * @fnic: fnic instance.
@@ -150,8 +603,14 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
150 skb_reset_mac_header(skb); 603 skb_reset_mac_header(skb);
151 } 604 }
152 if (eh->h_proto == htons(ETH_P_FIP)) { 605 if (eh->h_proto == htons(ETH_P_FIP)) {
153 skb_pull(skb, sizeof(*eh)); 606 if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
154 fcoe_ctlr_recv(&fnic->ctlr, skb); 607 printk(KERN_ERR "Dropped FIP frame, as firmware "
608 "uses non-FIP mode, Enable FIP "
609 "using UCSM\n");
610 goto drop;
611 }
612 skb_queue_tail(&fnic->fip_frame_queue, skb);
613 queue_work(fnic_fip_queue, &fnic->fip_frame_work);
155 return 1; /* let caller know packet was used */ 614 return 1; /* let caller know packet was used */
156 } 615 }
157 if (eh->h_proto != htons(ETH_P_FCOE)) 616 if (eh->h_proto != htons(ETH_P_FCOE))
@@ -720,3 +1179,104 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
720 dev_kfree_skb(fp_skb(fp)); 1179 dev_kfree_skb(fp_skb(fp));
721 buf->os_buf = NULL; 1180 buf->os_buf = NULL;
722} 1181}
1182
1183void fnic_fcoe_reset_vlans(struct fnic *fnic)
1184{
1185 unsigned long flags;
1186 struct fcoe_vlan *vlan;
1187 struct fcoe_vlan *next;
1188
1189 /*
1190 * indicate a link down to fcoe so that all fcf's are free'd
1191 * might not be required since we did this before sending vlan
1192 * discovery request
1193 */
1194 spin_lock_irqsave(&fnic->vlans_lock, flags);
1195 if (!list_empty(&fnic->vlans)) {
1196 list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
1197 list_del(&vlan->list);
1198 kfree(vlan);
1199 }
1200 }
1201 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1202}
1203
1204void fnic_handle_fip_timer(struct fnic *fnic)
1205{
1206 unsigned long flags;
1207 struct fcoe_vlan *vlan;
1208 u64 sol_time;
1209
1210 spin_lock_irqsave(&fnic->fnic_lock, flags);
1211 if (fnic->stop_rx_link_events) {
1212 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1213 return;
1214 }
1215 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1216
1217 if (fnic->ctlr.mode == FIP_ST_NON_FIP)
1218 return;
1219
1220 spin_lock_irqsave(&fnic->vlans_lock, flags);
1221 if (list_empty(&fnic->vlans)) {
1222 /* no vlans available, try again */
1223 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1224 "Start VLAN Discovery\n");
1225 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1226 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1227 return;
1228 }
1229
1230 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
1231 shost_printk(KERN_DEBUG, fnic->lport->host,
1232 "fip_timer: vlan %d state %d sol_count %d\n",
1233 vlan->vid, vlan->state, vlan->sol_count);
1234 switch (vlan->state) {
1235 case FIP_VLAN_USED:
1236 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1237 "FIP VLAN is selected for FC transaction\n");
1238 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1239 break;
1240 case FIP_VLAN_FAILED:
1241 /* if all vlans are in failed state, restart vlan disc */
1242 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1243 "Start VLAN Discovery\n");
1244 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1245 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1246 break;
1247 case FIP_VLAN_SENT:
1248 if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
1249 /*
1250 * no response on this vlan, remove from the list.
1251 * Try the next vlan
1252 */
1253 shost_printk(KERN_INFO, fnic->lport->host,
1254 "Dequeue this VLAN ID %d from list\n",
1255 vlan->vid);
1256 list_del(&vlan->list);
1257 kfree(vlan);
1258 vlan = NULL;
1259 if (list_empty(&fnic->vlans)) {
1260 /* we exhausted all vlans, restart vlan disc */
1261 spin_unlock_irqrestore(&fnic->vlans_lock,
1262 flags);
1263 shost_printk(KERN_INFO, fnic->lport->host,
1264 "fip_timer: vlan list empty, "
1265 "trigger vlan disc\n");
1266 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1267 return;
1268 }
1269 /* check the next vlan */
1270 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
1271 list);
1272 fnic->set_vlan(fnic, vlan->vid);
1273 vlan->state = FIP_VLAN_SENT; /* sent now */
1274 }
1275 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1276 vlan->sol_count++;
1277 sol_time = jiffies + msecs_to_jiffies
1278 (FCOE_CTLR_START_DELAY);
1279 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
1280 break;
1281 }
1282}
diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h
new file mode 100644
index 000000000000..87e74c2ab971
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_fip.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18
19#ifndef _FNIC_FIP_H_
20#define _FNIC_FIP_H_
21
22
23#define FCOE_CTLR_START_DELAY 2000 /* ms after first adv. to choose FCF */
24#define FCOE_CTLR_FIPVLAN_TOV 2000 /* ms after FIP VLAN disc */
25#define FCOE_CTLR_MAX_SOL 8
26
27#define FINC_MAX_FLOGI_REJECTS 8
28
29/*
30 * FIP_DT_VLAN descriptor.
31 */
32struct fip_vlan_desc {
33 struct fip_desc fd_desc;
34 __be16 fd_vlan;
35} __attribute__((packed));
36
37struct vlan {
38 __be16 vid;
39 __be16 type;
40};
41
42/*
43 * VLAN entry.
44 */
45struct fcoe_vlan {
46 struct list_head list;
47 u16 vid; /* vlan ID */
48 u16 sol_count; /* no. of sols sent */
49 u16 state; /* state */
50};
51
52enum fip_vlan_state {
53 FIP_VLAN_AVAIL = 0, /* don't do anything */
54 FIP_VLAN_SENT = 1, /* sent */
55 FIP_VLAN_USED = 2, /* succeed */
56 FIP_VLAN_FAILED = 3, /* failed to response */
57};
58
59struct fip_vlan {
60 struct ethhdr eth;
61 struct fip_header fip;
62 struct {
63 struct fip_mac_desc mac;
64 struct fip_wwn_desc wwnn;
65 } desc;
66};
67
68#endif /* __FINC_FIP_H_ */
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index d601ac543c52..5f09d1814d26 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -39,6 +39,7 @@
39#include "vnic_intr.h" 39#include "vnic_intr.h"
40#include "vnic_stats.h" 40#include "vnic_stats.h"
41#include "fnic_io.h" 41#include "fnic_io.h"
42#include "fnic_fip.h"
42#include "fnic.h" 43#include "fnic.h"
43 44
44#define PCI_DEVICE_ID_CISCO_FNIC 0x0045 45#define PCI_DEVICE_ID_CISCO_FNIC 0x0045
@@ -292,6 +293,13 @@ static void fnic_notify_timer(unsigned long data)
292 round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD)); 293 round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
293} 294}
294 295
296static void fnic_fip_notify_timer(unsigned long data)
297{
298 struct fnic *fnic = (struct fnic *)data;
299
300 fnic_handle_fip_timer(fnic);
301}
302
295static void fnic_notify_timer_start(struct fnic *fnic) 303static void fnic_notify_timer_start(struct fnic *fnic)
296{ 304{
297 switch (vnic_dev_get_intr_mode(fnic->vdev)) { 305 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
@@ -403,6 +411,12 @@ static u8 *fnic_get_mac(struct fc_lport *lport)
403 return fnic->data_src_addr; 411 return fnic->data_src_addr;
404} 412}
405 413
414static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
415{
416 u16 old_vlan;
417 old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
418}
419
406static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 420static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
407{ 421{
408 struct Scsi_Host *host; 422 struct Scsi_Host *host;
@@ -620,7 +634,29 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
620 vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); 634 vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
621 vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); 635 vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
622 vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); 636 vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
637 fnic->set_vlan = fnic_set_vlan;
623 fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO); 638 fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
639 setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
640 (unsigned long)fnic);
641 spin_lock_init(&fnic->vlans_lock);
642 INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
643 INIT_WORK(&fnic->event_work, fnic_handle_event);
644 skb_queue_head_init(&fnic->fip_frame_queue);
645 spin_lock_irqsave(&fnic_list_lock, flags);
646 if (!fnic_fip_queue) {
647 fnic_fip_queue =
648 create_singlethread_workqueue("fnic_fip_q");
649 if (!fnic_fip_queue) {
650 spin_unlock_irqrestore(&fnic_list_lock, flags);
651 printk(KERN_ERR PFX "fnic FIP work queue "
652 "create failed\n");
653 err = -ENOMEM;
654 goto err_out_free_max_pool;
655 }
656 }
657 spin_unlock_irqrestore(&fnic_list_lock, flags);
658 INIT_LIST_HEAD(&fnic->evlist);
659 INIT_LIST_HEAD(&fnic->vlans);
624 } else { 660 } else {
625 shost_printk(KERN_INFO, fnic->lport->host, 661 shost_printk(KERN_INFO, fnic->lport->host,
626 "firmware uses non-FIP mode\n"); 662 "firmware uses non-FIP mode\n");
@@ -807,6 +843,13 @@ static void fnic_remove(struct pci_dev *pdev)
807 skb_queue_purge(&fnic->frame_queue); 843 skb_queue_purge(&fnic->frame_queue);
808 skb_queue_purge(&fnic->tx_queue); 844 skb_queue_purge(&fnic->tx_queue);
809 845
846 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
847 del_timer_sync(&fnic->fip_timer);
848 skb_queue_purge(&fnic->fip_frame_queue);
849 fnic_fcoe_reset_vlans(fnic);
850 fnic_fcoe_evlist_free(fnic);
851 }
852
810 /* 853 /*
811 * Log off the fabric. This stops all remote ports, dns port, 854 * Log off the fabric. This stops all remote ports, dns port,
812 * logs off the fabric. This flushes all rport, disc, lport work 855 * logs off the fabric. This flushes all rport, disc, lport work
@@ -889,8 +932,8 @@ static int __init fnic_init_module(void)
889 len = sizeof(struct fnic_sgl_list); 932 len = sizeof(struct fnic_sgl_list);
890 fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create 933 fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
891 ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, 934 ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
892 SLAB_HWCACHE_ALIGN, 935 SLAB_HWCACHE_ALIGN,
893 NULL); 936 NULL);
894 if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) { 937 if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
895 printk(KERN_ERR PFX "failed to create fnic max sgl slab\n"); 938 printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
896 err = -ENOMEM; 939 err = -ENOMEM;
@@ -951,6 +994,10 @@ static void __exit fnic_cleanup_module(void)
951{ 994{
952 pci_unregister_driver(&fnic_driver); 995 pci_unregister_driver(&fnic_driver);
953 destroy_workqueue(fnic_event_queue); 996 destroy_workqueue(fnic_event_queue);
997 if (fnic_fip_queue) {
998 flush_workqueue(fnic_fip_queue);
999 destroy_workqueue(fnic_fip_queue);
1000 }
954 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); 1001 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
955 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); 1002 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
956 kmem_cache_destroy(fnic_io_req_cache); 1003 kmem_cache_destroy(fnic_io_req_cache);
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index b576be734e2e..9795d6f3e197 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -584,6 +584,16 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
584 return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); 584 return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
585} 585}
586 586
587u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan)
588{
589 u64 a0 = new_default_vlan, a1 = 0;
590 int wait = 1000;
591 int old_vlan = 0;
592
593 old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait);
594 return (u16)old_vlan;
595}
596
587int vnic_dev_link_status(struct vnic_dev *vdev) 597int vnic_dev_link_status(struct vnic_dev *vdev)
588{ 598{
589 if (vdev->linkstatus) 599 if (vdev->linkstatus)
diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h
index f9935a8a5a09..40d4195f562b 100644
--- a/drivers/scsi/fnic/vnic_dev.h
+++ b/drivers/scsi/fnic/vnic_dev.h
@@ -148,6 +148,8 @@ int vnic_dev_disable(struct vnic_dev *vdev);
148int vnic_dev_open(struct vnic_dev *vdev, int arg); 148int vnic_dev_open(struct vnic_dev *vdev, int arg);
149int vnic_dev_open_done(struct vnic_dev *vdev, int *done); 149int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
150int vnic_dev_init(struct vnic_dev *vdev, int arg); 150int vnic_dev_init(struct vnic_dev *vdev, int arg);
151u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev,
152 u16 new_default_vlan);
151int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); 153int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
152int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); 154int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
153void vnic_dev_set_intr_mode(struct vnic_dev *vdev, 155void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
index 7c9ccbd4134b..3e2fcbda6aed 100644
--- a/drivers/scsi/fnic/vnic_devcmd.h
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -196,6 +196,73 @@ enum vnic_devcmd_cmd {
196 196
197 /* undo initialize of virtual link */ 197 /* undo initialize of virtual link */
198 CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34), 198 CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
199
200 /* check fw capability of a cmd:
201 * in: (u32)a0=cmd
202 * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
203 CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
204
205 /* persistent binding info
206 * in: (u64)a0=paddr of arg
207 * (u32)a1=CMD_PERBI_XXX */
208 CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37),
209
210 /* Interrupt Assert Register functionality
211 * in: (u16)a0=interrupt number to assert
212 */
213 CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38),
214
215 /* initiate hangreset, like softreset after hang detected */
216 CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39),
217
218 /* hangreset status:
219 * out: a0=0 reset complete, a0=1 reset in progress */
220 CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40),
221
222 /*
223 * Set hw ingress packet vlan rewrite mode:
224 * in: (u32)a0=new vlan rewrite mode
225 * out: (u32)a0=old vlan rewrite mode */
226 CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41),
227
228 /*
229 * in: (u16)a0=bdf of target vnic
230 * (u32)a1=cmd to proxy
231 * a2-a15=args to cmd in a1
232 * out: (u32)a0=status of proxied cmd
233 * a1-a15=out args of proxied cmd */
234 CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
235
236 /*
237 * As for BY_BDF except a0 is index of hvnlink subordinate vnic
238 * or SR-IOV virtual vnic
239 */
240 CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
241
242 /*
243 * For HPP toggle:
244 * adapter-info-get
245 * in: (u64)a0=phsical address of buffer passed in from caller.
246 * (u16)a1=size of buffer specified in a0.
247 * out: (u64)a0=phsical address of buffer passed in from caller.
248 * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or
249 * 0 if no VIF-CONFIG-INFO TLV was ever received. */
250 CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
251
252 /*
253 * INT13 API: (u64)a0=paddr to vnic_int13_params struct
254 * (u32)a1=INT13_CMD_xxx
255 */
256 CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
257
258 /*
259 * Set default vlan:
260 * in: (u16)a0=new default vlan
261 * (u16)a1=zero for overriding vlan with param a0,
262 * non-zero for resetting vlan to the default
263 * out: (u16)a0=old default vlan
264 */
265 CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46)
199}; 266};
200 267
201/* flags for CMD_OPEN */ 268/* flags for CMD_OPEN */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index cc82d0f322b6..4e31caa21ddf 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2179,7 +2179,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2179 return 0; 2179 return 0;
2180 } 2180 }
2181 2181
2182 if (vhost->state == IBMVFC_ACTIVE) { 2182 if (vhost->logged_in) {
2183 evt = ibmvfc_get_event(vhost); 2183 evt = ibmvfc_get_event(vhost);
2184 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); 2184 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2185 2185
@@ -2190,7 +2190,12 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2190 tmf->common.length = sizeof(*tmf); 2190 tmf->common.length = sizeof(*tmf);
2191 tmf->scsi_id = rport->port_id; 2191 tmf->scsi_id = rport->port_id;
2192 int_to_scsilun(sdev->lun, &tmf->lun); 2192 int_to_scsilun(sdev->lun, &tmf->lun);
2193 tmf->flags = (type | IBMVFC_TMF_LUA_VALID); 2193 if (!(vhost->login_buf->resp.capabilities & IBMVFC_CAN_SUPPRESS_ABTS))
2194 type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
2195 if (vhost->state == IBMVFC_ACTIVE)
2196 tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
2197 else
2198 tmf->flags = ((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID);
2194 tmf->cancel_key = (unsigned long)sdev->hostdata; 2199 tmf->cancel_key = (unsigned long)sdev->hostdata;
2195 tmf->my_cancel_key = (unsigned long)starget->hostdata; 2200 tmf->my_cancel_key = (unsigned long)starget->hostdata;
2196 2201
@@ -2327,7 +2332,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2327 timeout = wait_for_completion_timeout(&evt->comp, timeout); 2332 timeout = wait_for_completion_timeout(&evt->comp, timeout);
2328 2333
2329 if (!timeout) { 2334 if (!timeout) {
2330 rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); 2335 rc = ibmvfc_cancel_all(sdev, 0);
2331 if (!rc) { 2336 if (!rc) {
2332 rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key); 2337 rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2333 if (rc == SUCCESS) 2338 if (rc == SUCCESS)
@@ -2383,24 +2388,30 @@ out:
2383 * @cmd: scsi command to abort 2388 * @cmd: scsi command to abort
2384 * 2389 *
2385 * Returns: 2390 * Returns:
2386 * SUCCESS / FAILED 2391 * SUCCESS / FAST_IO_FAIL / FAILED
2387 **/ 2392 **/
2388static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd) 2393static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2389{ 2394{
2390 struct scsi_device *sdev = cmd->device; 2395 struct scsi_device *sdev = cmd->device;
2391 struct ibmvfc_host *vhost = shost_priv(sdev->host); 2396 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2392 int cancel_rc, abort_rc; 2397 int cancel_rc, block_rc;
2393 int rc = FAILED; 2398 int rc = FAILED;
2394 2399
2395 ENTER; 2400 ENTER;
2396 fc_block_scsi_eh(cmd); 2401 block_rc = fc_block_scsi_eh(cmd);
2397 ibmvfc_wait_while_resetting(vhost); 2402 ibmvfc_wait_while_resetting(vhost);
2398 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); 2403 if (block_rc != FAST_IO_FAIL) {
2399 abort_rc = ibmvfc_abort_task_set(sdev); 2404 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2405 ibmvfc_abort_task_set(sdev);
2406 } else
2407 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2400 2408
2401 if (!cancel_rc && !abort_rc) 2409 if (!cancel_rc)
2402 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun); 2410 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2403 2411
2412 if (block_rc == FAST_IO_FAIL && rc != FAILED)
2413 rc = FAST_IO_FAIL;
2414
2404 LEAVE; 2415 LEAVE;
2405 return rc; 2416 return rc;
2406} 2417}
@@ -2410,29 +2421,47 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2410 * @cmd: scsi command struct 2421 * @cmd: scsi command struct
2411 * 2422 *
2412 * Returns: 2423 * Returns:
2413 * SUCCESS / FAILED 2424 * SUCCESS / FAST_IO_FAIL / FAILED
2414 **/ 2425 **/
2415static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd) 2426static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2416{ 2427{
2417 struct scsi_device *sdev = cmd->device; 2428 struct scsi_device *sdev = cmd->device;
2418 struct ibmvfc_host *vhost = shost_priv(sdev->host); 2429 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2419 int cancel_rc, reset_rc; 2430 int cancel_rc, block_rc, reset_rc = 0;
2420 int rc = FAILED; 2431 int rc = FAILED;
2421 2432
2422 ENTER; 2433 ENTER;
2423 fc_block_scsi_eh(cmd); 2434 block_rc = fc_block_scsi_eh(cmd);
2424 ibmvfc_wait_while_resetting(vhost); 2435 ibmvfc_wait_while_resetting(vhost);
2425 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET); 2436 if (block_rc != FAST_IO_FAIL) {
2426 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN"); 2437 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2438 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
2439 } else
2440 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2427 2441
2428 if (!cancel_rc && !reset_rc) 2442 if (!cancel_rc && !reset_rc)
2429 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun); 2443 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2430 2444
2445 if (block_rc == FAST_IO_FAIL && rc != FAILED)
2446 rc = FAST_IO_FAIL;
2447
2431 LEAVE; 2448 LEAVE;
2432 return rc; 2449 return rc;
2433} 2450}
2434 2451
2435/** 2452/**
2453 * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
2454 * @sdev: scsi device struct
2455 * @data: return code
2456 *
2457 **/
2458static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
2459{
2460 unsigned long *rc = data;
2461 *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2462}
2463
2464/**
2436 * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function 2465 * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
2437 * @sdev: scsi device struct 2466 * @sdev: scsi device struct
2438 * @data: return code 2467 * @data: return code
@@ -2449,26 +2478,33 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
2449 * @cmd: scsi command struct 2478 * @cmd: scsi command struct
2450 * 2479 *
2451 * Returns: 2480 * Returns:
2452 * SUCCESS / FAILED 2481 * SUCCESS / FAST_IO_FAIL / FAILED
2453 **/ 2482 **/
2454static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) 2483static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2455{ 2484{
2456 struct scsi_device *sdev = cmd->device; 2485 struct scsi_device *sdev = cmd->device;
2457 struct ibmvfc_host *vhost = shost_priv(sdev->host); 2486 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2458 struct scsi_target *starget = scsi_target(sdev); 2487 struct scsi_target *starget = scsi_target(sdev);
2459 int reset_rc; 2488 int block_rc;
2489 int reset_rc = 0;
2460 int rc = FAILED; 2490 int rc = FAILED;
2461 unsigned long cancel_rc = 0; 2491 unsigned long cancel_rc = 0;
2462 2492
2463 ENTER; 2493 ENTER;
2464 fc_block_scsi_eh(cmd); 2494 block_rc = fc_block_scsi_eh(cmd);
2465 ibmvfc_wait_while_resetting(vhost); 2495 ibmvfc_wait_while_resetting(vhost);
2466 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset); 2496 if (block_rc != FAST_IO_FAIL) {
2467 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); 2497 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
2498 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
2499 } else
2500 starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
2468 2501
2469 if (!cancel_rc && !reset_rc) 2502 if (!cancel_rc && !reset_rc)
2470 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target); 2503 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2471 2504
2505 if (block_rc == FAST_IO_FAIL && rc != FAILED)
2506 rc = FAST_IO_FAIL;
2507
2472 LEAVE; 2508 LEAVE;
2473 return rc; 2509 return rc;
2474} 2510}
@@ -2480,12 +2516,16 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2480 **/ 2516 **/
2481static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd) 2517static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
2482{ 2518{
2483 int rc; 2519 int rc, block_rc;
2484 struct ibmvfc_host *vhost = shost_priv(cmd->device->host); 2520 struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
2485 2521
2486 fc_block_scsi_eh(cmd); 2522 block_rc = fc_block_scsi_eh(cmd);
2487 dev_err(vhost->dev, "Resetting connection due to error recovery\n"); 2523 dev_err(vhost->dev, "Resetting connection due to error recovery\n");
2488 rc = ibmvfc_issue_fc_host_lip(vhost->host); 2524 rc = ibmvfc_issue_fc_host_lip(vhost->host);
2525
2526 if (block_rc == FAST_IO_FAIL)
2527 return FAST_IO_FAIL;
2528
2489 return rc ? FAILED : SUCCESS; 2529 return rc ? FAILED : SUCCESS;
2490} 2530}
2491 2531
@@ -2509,8 +2549,7 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2509 dev_rport = starget_to_rport(scsi_target(sdev)); 2549 dev_rport = starget_to_rport(scsi_target(sdev));
2510 if (dev_rport != rport) 2550 if (dev_rport != rport)
2511 continue; 2551 continue;
2512 ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); 2552 ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2513 ibmvfc_abort_task_set(sdev);
2514 } 2553 }
2515 2554
2516 rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport); 2555 rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 3be8af624e6f..017a5290e8c1 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
29#include "viosrp.h" 29#include "viosrp.h"
30 30
31#define IBMVFC_NAME "ibmvfc" 31#define IBMVFC_NAME "ibmvfc"
32#define IBMVFC_DRIVER_VERSION "1.0.10" 32#define IBMVFC_DRIVER_VERSION "1.0.11"
33#define IBMVFC_DRIVER_DATE "(August 24, 2012)" 33#define IBMVFC_DRIVER_DATE "(April 12, 2013)"
34 34
35#define IBMVFC_DEFAULT_TIMEOUT 60 35#define IBMVFC_DEFAULT_TIMEOUT 60
36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45 36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45
@@ -208,10 +208,10 @@ struct ibmvfc_npiv_login_resp {
208 u16 error; 208 u16 error;
209 u32 flags; 209 u32 flags;
210#define IBMVFC_NATIVE_FC 0x01 210#define IBMVFC_NATIVE_FC 0x01
211#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
212 u32 reserved; 211 u32 reserved;
213 u64 capabilities; 212 u64 capabilities;
214#define IBMVFC_CAN_FLUSH_ON_HALT 0x08 213#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
214#define IBMVFC_CAN_SUPPRESS_ABTS 0x10
215 u32 max_cmds; 215 u32 max_cmds;
216 u32 scsi_id_sz; 216 u32 scsi_id_sz;
217 u64 max_dma_len; 217 u64 max_dma_len;
@@ -351,6 +351,7 @@ struct ibmvfc_tmf {
351#define IBMVFC_TMF_LUN_RESET 0x10 351#define IBMVFC_TMF_LUN_RESET 0x10
352#define IBMVFC_TMF_TGT_RESET 0x20 352#define IBMVFC_TMF_TGT_RESET 0x20
353#define IBMVFC_TMF_LUA_VALID 0x40 353#define IBMVFC_TMF_LUA_VALID 0x40
354#define IBMVFC_TMF_SUPPRESS_ABTS 0x80
354 u32 cancel_key; 355 u32 cancel_key;
355 u32 my_cancel_key; 356 u32 my_cancel_key;
356 u32 pad; 357 u32 pad;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 2197b57fb225..82a3c1ec8706 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4777,7 +4777,7 @@ static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4777 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; 4777 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4778 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4778 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4779 4779
4780 if (!ioa_cfg->in_reset_reload) { 4780 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4781 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); 4781 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4782 dev_err(&ioa_cfg->pdev->dev, 4782 dev_err(&ioa_cfg->pdev->dev,
4783 "Adapter being reset as a result of error recovery.\n"); 4783 "Adapter being reset as a result of error recovery.\n");
@@ -6421,7 +6421,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6421{ 6421{
6422 u32 ioadl_flags = 0; 6422 u32 ioadl_flags = 0;
6423 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6423 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6424 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; 6424 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6425 struct ipr_ioadl64_desc *last_ioadl64 = NULL; 6425 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6426 int len = qc->nbytes; 6426 int len = qc->nbytes;
6427 struct scatterlist *sg; 6427 struct scatterlist *sg;
@@ -6441,7 +6441,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6441 ioarcb->ioadl_len = 6441 ioarcb->ioadl_len =
6442 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); 6442 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6443 ioarcb->u.sis64_addr_data.data_ioadl_addr = 6443 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6444 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl)); 6444 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6445 6445
6446 for_each_sg(qc->sg, sg, qc->n_elem, si) { 6446 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6447 ioadl64->flags = cpu_to_be32(ioadl_flags); 6447 ioadl64->flags = cpu_to_be32(ioadl_flags);
@@ -6739,6 +6739,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6739static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) 6739static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6740{ 6740{
6741 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6741 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6742 int i;
6742 6743
6743 ENTER; 6744 ENTER;
6744 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 6745 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
@@ -6750,6 +6751,13 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6750 6751
6751 ioa_cfg->in_reset_reload = 0; 6752 ioa_cfg->in_reset_reload = 0;
6752 ioa_cfg->reset_retries = 0; 6753 ioa_cfg->reset_retries = 0;
6754 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6755 spin_lock(&ioa_cfg->hrrq[i]._lock);
6756 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6757 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6758 }
6759 wmb();
6760
6753 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6761 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6754 wake_up_all(&ioa_cfg->reset_wait_q); 6762 wake_up_all(&ioa_cfg->reset_wait_q);
6755 LEAVE; 6763 LEAVE;
@@ -8651,7 +8659,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
8651 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 8659 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8652 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) 8660 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8653 ioa_cfg->sdt_state = ABORT_DUMP; 8661 ioa_cfg->sdt_state = ABORT_DUMP;
8654 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; 8662 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8655 ioa_cfg->in_ioa_bringdown = 1; 8663 ioa_cfg->in_ioa_bringdown = 1;
8656 for (i = 0; i < ioa_cfg->hrrq_num; i++) { 8664 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8657 spin_lock(&ioa_cfg->hrrq[i]._lock); 8665 spin_lock(&ioa_cfg->hrrq[i]._lock);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 21a6ff1ed5c6..a1fb840596ef 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -552,7 +552,7 @@ struct ipr_ioarcb_ata_regs { /* 22 bytes */
552 u8 hob_lbam; 552 u8 hob_lbam;
553 u8 hob_lbah; 553 u8 hob_lbah;
554 u8 ctl; 554 u8 ctl;
555}__attribute__ ((packed, aligned(4))); 555}__attribute__ ((packed, aligned(2)));
556 556
557struct ipr_ioadl_desc { 557struct ipr_ioadl_desc {
558 __be32 flags_and_data_len; 558 __be32 flags_and_data_len;
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index c3aa6c5457b9..96a26f454673 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -1085,7 +1085,7 @@ static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *s
1085 struct isci_host *ihost = idev->owning_port->owning_controller; 1085 struct isci_host *ihost = idev->owning_port->owning_controller;
1086 struct domain_device *dev = idev->domain_dev; 1086 struct domain_device *dev = idev->domain_dev;
1087 1087
1088 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { 1088 if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
1089 sci_change_state(&idev->sm, SCI_STP_DEV_IDLE); 1089 sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
1090 } else if (dev_is_expander(dev)) { 1090 } else if (dev_is_expander(dev)) {
1091 sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); 1091 sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
@@ -1098,7 +1098,7 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm
1098 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1098 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1099 struct domain_device *dev = idev->domain_dev; 1099 struct domain_device *dev = idev->domain_dev;
1100 1100
1101 if (dev->dev_type == SAS_END_DEV) { 1101 if (dev->dev_type == SAS_END_DEVICE) {
1102 struct isci_host *ihost = idev->owning_port->owning_controller; 1102 struct isci_host *ihost = idev->owning_port->owning_controller;
1103 1103
1104 isci_remote_device_not_ready(ihost, idev, 1104 isci_remote_device_not_ready(ihost, idev,
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 7674caae1d88..47a013fffae7 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -297,7 +297,7 @@ static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_conte
297 297
298static inline bool dev_is_expander(struct domain_device *dev) 298static inline bool dev_is_expander(struct domain_device *dev)
299{ 299{
300 return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV; 300 return dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE;
301} 301}
302 302
303static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev) 303static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 9594ab62702b..e3e3bcbd5a9f 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2978,7 +2978,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
2978 /* all unaccelerated request types (non ssp or ncq) handled with 2978 /* all unaccelerated request types (non ssp or ncq) handled with
2979 * substates 2979 * substates
2980 */ 2980 */
2981 if (!task && dev->dev_type == SAS_END_DEV) { 2981 if (!task && dev->dev_type == SAS_END_DEVICE) {
2982 state = SCI_REQ_TASK_WAIT_TC_COMP; 2982 state = SCI_REQ_TASK_WAIT_TC_COMP;
2983 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { 2983 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
2984 state = SCI_REQ_SMP_WAIT_RESP; 2984 state = SCI_REQ_SMP_WAIT_RESP;
@@ -3101,7 +3101,7 @@ sci_io_request_construct(struct isci_host *ihost,
3101 if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) 3101 if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
3102 return SCI_FAILURE_INVALID_REMOTE_DEVICE; 3102 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3103 3103
3104 if (dev->dev_type == SAS_END_DEV) 3104 if (dev->dev_type == SAS_END_DEVICE)
3105 /* pass */; 3105 /* pass */;
3106 else if (dev_is_sata(dev)) 3106 else if (dev_is_sata(dev))
3107 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); 3107 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
@@ -3125,7 +3125,7 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost,
3125 /* Build the common part of the request */ 3125 /* Build the common part of the request */
3126 sci_general_request_construct(ihost, idev, ireq); 3126 sci_general_request_construct(ihost, idev, ireq);
3127 3127
3128 if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) { 3128 if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) {
3129 set_bit(IREQ_TMF, &ireq->flags); 3129 set_bit(IREQ_TMF, &ireq->flags);
3130 memset(ireq->tc, 0, sizeof(struct scu_task_context)); 3130 memset(ireq->tc, 0, sizeof(struct scu_task_context));
3131 3131
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index b6f19a1db780..9bb020ac089c 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -250,7 +250,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
250 } 250 }
251 251
252 /* XXX convert to get this from task->tproto like other drivers */ 252 /* XXX convert to get this from task->tproto like other drivers */
253 if (dev->dev_type == SAS_END_DEV) { 253 if (dev->dev_type == SAS_END_DEVICE) {
254 isci_tmf->proto = SAS_PROTOCOL_SSP; 254 isci_tmf->proto = SAS_PROTOCOL_SSP;
255 status = sci_task_request_construct_ssp(ireq); 255 status = sci_task_request_construct_ssp(ireq);
256 if (status != SCI_SUCCESS) 256 if (status != SCI_SUCCESS)
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index bdb81cda8401..161c98efade9 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -285,14 +285,14 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
285 if (phy->attached_tproto & SAS_PROTOCOL_STP) 285 if (phy->attached_tproto & SAS_PROTOCOL_STP)
286 dev->tproto = phy->attached_tproto; 286 dev->tproto = phy->attached_tproto;
287 if (phy->attached_sata_dev) 287 if (phy->attached_sata_dev)
288 dev->tproto |= SATA_DEV; 288 dev->tproto |= SAS_SATA_DEV;
289 289
290 if (phy->attached_dev_type == SATA_PENDING) 290 if (phy->attached_dev_type == SAS_SATA_PENDING)
291 dev->dev_type = SATA_PENDING; 291 dev->dev_type = SAS_SATA_PENDING;
292 else { 292 else {
293 int res; 293 int res;
294 294
295 dev->dev_type = SATA_DEV; 295 dev->dev_type = SAS_SATA_DEV;
296 res = sas_get_report_phy_sata(dev->parent, phy->phy_id, 296 res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
297 &dev->sata_dev.rps_resp); 297 &dev->sata_dev.rps_resp);
298 if (res) { 298 if (res) {
@@ -314,7 +314,7 @@ static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy)
314 int res; 314 int res;
315 315
316 /* we weren't pending, so successfully end the reset sequence now */ 316 /* we weren't pending, so successfully end the reset sequence now */
317 if (dev->dev_type != SATA_PENDING) 317 if (dev->dev_type != SAS_SATA_PENDING)
318 return 1; 318 return 1;
319 319
320 /* hmmm, if this succeeds do we need to repost the domain_device to the 320 /* hmmm, if this succeeds do we need to repost the domain_device to the
@@ -348,9 +348,9 @@ static int smp_ata_check_ready(struct ata_link *link)
348 return 0; 348 return 0;
349 349
350 switch (ex_phy->attached_dev_type) { 350 switch (ex_phy->attached_dev_type) {
351 case SATA_PENDING: 351 case SAS_SATA_PENDING:
352 return 0; 352 return 0;
353 case SAS_END_DEV: 353 case SAS_END_DEVICE:
354 if (ex_phy->attached_sata_dev) 354 if (ex_phy->attached_sata_dev)
355 return sas_ata_clear_pending(dev, ex_phy); 355 return sas_ata_clear_pending(dev, ex_phy);
356 default: 356 default:
@@ -631,7 +631,7 @@ static void sas_get_ata_command_set(struct domain_device *dev)
631 struct dev_to_host_fis *fis = 631 struct dev_to_host_fis *fis =
632 (struct dev_to_host_fis *) dev->frame_rcvd; 632 (struct dev_to_host_fis *) dev->frame_rcvd;
633 633
634 if (dev->dev_type == SATA_PENDING) 634 if (dev->dev_type == SAS_SATA_PENDING)
635 return; 635 return;
636 636
637 if ((fis->sector_count == 1 && /* ATA */ 637 if ((fis->sector_count == 1 && /* ATA */
@@ -797,7 +797,7 @@ int sas_discover_sata(struct domain_device *dev)
797{ 797{
798 int res; 798 int res;
799 799
800 if (dev->dev_type == SATA_PM) 800 if (dev->dev_type == SAS_SATA_PM)
801 return -ENODEV; 801 return -ENODEV;
802 802
803 sas_get_ata_command_set(dev); 803 sas_get_ata_command_set(dev);
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index a0c3003e0c7d..62b58d38ce2e 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -39,11 +39,11 @@
39void sas_init_dev(struct domain_device *dev) 39void sas_init_dev(struct domain_device *dev)
40{ 40{
41 switch (dev->dev_type) { 41 switch (dev->dev_type) {
42 case SAS_END_DEV: 42 case SAS_END_DEVICE:
43 INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node); 43 INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node);
44 break; 44 break;
45 case EDGE_DEV: 45 case SAS_EDGE_EXPANDER_DEVICE:
46 case FANOUT_DEV: 46 case SAS_FANOUT_EXPANDER_DEVICE:
47 INIT_LIST_HEAD(&dev->ex_dev.children); 47 INIT_LIST_HEAD(&dev->ex_dev.children);
48 mutex_init(&dev->ex_dev.cmd_mutex); 48 mutex_init(&dev->ex_dev.cmd_mutex);
49 break; 49 break;
@@ -93,9 +93,9 @@ static int sas_get_port_device(struct asd_sas_port *port)
93 if (fis->interrupt_reason == 1 && fis->lbal == 1 && 93 if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
94 fis->byte_count_low==0x69 && fis->byte_count_high == 0x96 94 fis->byte_count_low==0x69 && fis->byte_count_high == 0x96
95 && (fis->device & ~0x10) == 0) 95 && (fis->device & ~0x10) == 0)
96 dev->dev_type = SATA_PM; 96 dev->dev_type = SAS_SATA_PM;
97 else 97 else
98 dev->dev_type = SATA_DEV; 98 dev->dev_type = SAS_SATA_DEV;
99 dev->tproto = SAS_PROTOCOL_SATA; 99 dev->tproto = SAS_PROTOCOL_SATA;
100 } else { 100 } else {
101 struct sas_identify_frame *id = 101 struct sas_identify_frame *id =
@@ -109,21 +109,21 @@ static int sas_get_port_device(struct asd_sas_port *port)
109 109
110 dev->port = port; 110 dev->port = port;
111 switch (dev->dev_type) { 111 switch (dev->dev_type) {
112 case SATA_DEV: 112 case SAS_SATA_DEV:
113 rc = sas_ata_init(dev); 113 rc = sas_ata_init(dev);
114 if (rc) { 114 if (rc) {
115 rphy = NULL; 115 rphy = NULL;
116 break; 116 break;
117 } 117 }
118 /* fall through */ 118 /* fall through */
119 case SAS_END_DEV: 119 case SAS_END_DEVICE:
120 rphy = sas_end_device_alloc(port->port); 120 rphy = sas_end_device_alloc(port->port);
121 break; 121 break;
122 case EDGE_DEV: 122 case SAS_EDGE_EXPANDER_DEVICE:
123 rphy = sas_expander_alloc(port->port, 123 rphy = sas_expander_alloc(port->port,
124 SAS_EDGE_EXPANDER_DEVICE); 124 SAS_EDGE_EXPANDER_DEVICE);
125 break; 125 break;
126 case FANOUT_DEV: 126 case SAS_FANOUT_EXPANDER_DEVICE:
127 rphy = sas_expander_alloc(port->port, 127 rphy = sas_expander_alloc(port->port,
128 SAS_FANOUT_EXPANDER_DEVICE); 128 SAS_FANOUT_EXPANDER_DEVICE);
129 break; 129 break;
@@ -156,7 +156,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
156 dev->rphy = rphy; 156 dev->rphy = rphy;
157 get_device(&dev->rphy->dev); 157 get_device(&dev->rphy->dev);
158 158
159 if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV) 159 if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEVICE)
160 list_add_tail(&dev->disco_list_node, &port->disco_list); 160 list_add_tail(&dev->disco_list_node, &port->disco_list);
161 else { 161 else {
162 spin_lock_irq(&port->dev_list_lock); 162 spin_lock_irq(&port->dev_list_lock);
@@ -315,7 +315,7 @@ void sas_free_device(struct kref *kref)
315 dev->phy = NULL; 315 dev->phy = NULL;
316 316
317 /* remove the phys and ports, everything else should be gone */ 317 /* remove the phys and ports, everything else should be gone */
318 if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) 318 if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
319 kfree(dev->ex_dev.ex_phy); 319 kfree(dev->ex_dev.ex_phy);
320 320
321 if (dev_is_sata(dev) && dev->sata_dev.ap) { 321 if (dev_is_sata(dev) && dev->sata_dev.ap) {
@@ -343,7 +343,7 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
343 spin_unlock_irq(&port->dev_list_lock); 343 spin_unlock_irq(&port->dev_list_lock);
344 344
345 spin_lock_irq(&ha->lock); 345 spin_lock_irq(&ha->lock);
346 if (dev->dev_type == SAS_END_DEV && 346 if (dev->dev_type == SAS_END_DEVICE &&
347 !list_empty(&dev->ssp_dev.eh_list_node)) { 347 !list_empty(&dev->ssp_dev.eh_list_node)) {
348 list_del_init(&dev->ssp_dev.eh_list_node); 348 list_del_init(&dev->ssp_dev.eh_list_node);
349 ha->eh_active--; 349 ha->eh_active--;
@@ -457,15 +457,15 @@ static void sas_discover_domain(struct work_struct *work)
457 task_pid_nr(current)); 457 task_pid_nr(current));
458 458
459 switch (dev->dev_type) { 459 switch (dev->dev_type) {
460 case SAS_END_DEV: 460 case SAS_END_DEVICE:
461 error = sas_discover_end_dev(dev); 461 error = sas_discover_end_dev(dev);
462 break; 462 break;
463 case EDGE_DEV: 463 case SAS_EDGE_EXPANDER_DEVICE:
464 case FANOUT_DEV: 464 case SAS_FANOUT_EXPANDER_DEVICE:
465 error = sas_discover_root_expander(dev); 465 error = sas_discover_root_expander(dev);
466 break; 466 break;
467 case SATA_DEV: 467 case SAS_SATA_DEV:
468 case SATA_PM: 468 case SAS_SATA_PM:
469#ifdef CONFIG_SCSI_SAS_ATA 469#ifdef CONFIG_SCSI_SAS_ATA
470 error = sas_discover_sata(dev); 470 error = sas_discover_sata(dev);
471 break; 471 break;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index f42b0e15410f..446b85110a1f 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -183,21 +183,21 @@ static char sas_route_char(struct domain_device *dev, struct ex_phy *phy)
183 } 183 }
184} 184}
185 185
186static enum sas_dev_type to_dev_type(struct discover_resp *dr) 186static enum sas_device_type to_dev_type(struct discover_resp *dr)
187{ 187{
188 /* This is detecting a failure to transmit initial dev to host 188 /* This is detecting a failure to transmit initial dev to host
189 * FIS as described in section J.5 of sas-2 r16 189 * FIS as described in section J.5 of sas-2 r16
190 */ 190 */
191 if (dr->attached_dev_type == NO_DEVICE && dr->attached_sata_dev && 191 if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev &&
192 dr->linkrate >= SAS_LINK_RATE_1_5_GBPS) 192 dr->linkrate >= SAS_LINK_RATE_1_5_GBPS)
193 return SATA_PENDING; 193 return SAS_SATA_PENDING;
194 else 194 else
195 return dr->attached_dev_type; 195 return dr->attached_dev_type;
196} 196}
197 197
198static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) 198static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
199{ 199{
200 enum sas_dev_type dev_type; 200 enum sas_device_type dev_type;
201 enum sas_linkrate linkrate; 201 enum sas_linkrate linkrate;
202 u8 sas_addr[SAS_ADDR_SIZE]; 202 u8 sas_addr[SAS_ADDR_SIZE];
203 struct smp_resp *resp = rsp; 203 struct smp_resp *resp = rsp;
@@ -238,7 +238,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
238 /* Handle vacant phy - rest of dr data is not valid so skip it */ 238 /* Handle vacant phy - rest of dr data is not valid so skip it */
239 if (phy->phy_state == PHY_VACANT) { 239 if (phy->phy_state == PHY_VACANT) {
240 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); 240 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
241 phy->attached_dev_type = NO_DEVICE; 241 phy->attached_dev_type = SAS_PHY_UNUSED;
242 if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { 242 if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
243 phy->phy_id = phy_id; 243 phy->phy_id = phy_id;
244 goto skip; 244 goto skip;
@@ -259,7 +259,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
259 /* help some expanders that fail to zero sas_address in the 'no 259 /* help some expanders that fail to zero sas_address in the 'no
260 * device' case 260 * device' case
261 */ 261 */
262 if (phy->attached_dev_type == NO_DEVICE || 262 if (phy->attached_dev_type == SAS_PHY_UNUSED ||
263 phy->linkrate < SAS_LINK_RATE_1_5_GBPS) 263 phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
264 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); 264 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
265 else 265 else
@@ -292,13 +292,13 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
292 292
293 out: 293 out:
294 switch (phy->attached_dev_type) { 294 switch (phy->attached_dev_type) {
295 case SATA_PENDING: 295 case SAS_SATA_PENDING:
296 type = "stp pending"; 296 type = "stp pending";
297 break; 297 break;
298 case NO_DEVICE: 298 case SAS_PHY_UNUSED:
299 type = "no device"; 299 type = "no device";
300 break; 300 break;
301 case SAS_END_DEV: 301 case SAS_END_DEVICE:
302 if (phy->attached_iproto) { 302 if (phy->attached_iproto) {
303 if (phy->attached_tproto) 303 if (phy->attached_tproto)
304 type = "host+target"; 304 type = "host+target";
@@ -311,8 +311,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
311 type = "ssp"; 311 type = "ssp";
312 } 312 }
313 break; 313 break;
314 case EDGE_DEV: 314 case SAS_EDGE_EXPANDER_DEVICE:
315 case FANOUT_DEV: 315 case SAS_FANOUT_EXPANDER_DEVICE:
316 type = "smp"; 316 type = "smp";
317 break; 317 break;
318 default: 318 default:
@@ -833,7 +833,7 @@ static struct domain_device *sas_ex_discover_end_dev(
833 } else 833 } else
834#endif 834#endif
835 if (phy->attached_tproto & SAS_PROTOCOL_SSP) { 835 if (phy->attached_tproto & SAS_PROTOCOL_SSP) {
836 child->dev_type = SAS_END_DEV; 836 child->dev_type = SAS_END_DEVICE;
837 rphy = sas_end_device_alloc(phy->port); 837 rphy = sas_end_device_alloc(phy->port);
838 /* FIXME: error handling */ 838 /* FIXME: error handling */
839 if (unlikely(!rphy)) 839 if (unlikely(!rphy))
@@ -932,11 +932,11 @@ static struct domain_device *sas_ex_discover_expander(
932 932
933 933
934 switch (phy->attached_dev_type) { 934 switch (phy->attached_dev_type) {
935 case EDGE_DEV: 935 case SAS_EDGE_EXPANDER_DEVICE:
936 rphy = sas_expander_alloc(phy->port, 936 rphy = sas_expander_alloc(phy->port,
937 SAS_EDGE_EXPANDER_DEVICE); 937 SAS_EDGE_EXPANDER_DEVICE);
938 break; 938 break;
939 case FANOUT_DEV: 939 case SAS_FANOUT_EXPANDER_DEVICE:
940 rphy = sas_expander_alloc(phy->port, 940 rphy = sas_expander_alloc(phy->port,
941 SAS_FANOUT_EXPANDER_DEVICE); 941 SAS_FANOUT_EXPANDER_DEVICE);
942 break; 942 break;
@@ -1013,7 +1013,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
1013 if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr)) 1013 if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr))
1014 sas_ex_disable_port(dev, ex_phy->attached_sas_addr); 1014 sas_ex_disable_port(dev, ex_phy->attached_sas_addr);
1015 1015
1016 if (ex_phy->attached_dev_type == NO_DEVICE) { 1016 if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) {
1017 if (ex_phy->routing_attr == DIRECT_ROUTING) { 1017 if (ex_phy->routing_attr == DIRECT_ROUTING) {
1018 memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); 1018 memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
1019 sas_configure_routing(dev, ex_phy->attached_sas_addr); 1019 sas_configure_routing(dev, ex_phy->attached_sas_addr);
@@ -1022,10 +1022,10 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
1022 } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN) 1022 } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN)
1023 return 0; 1023 return 0;
1024 1024
1025 if (ex_phy->attached_dev_type != SAS_END_DEV && 1025 if (ex_phy->attached_dev_type != SAS_END_DEVICE &&
1026 ex_phy->attached_dev_type != FANOUT_DEV && 1026 ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE &&
1027 ex_phy->attached_dev_type != EDGE_DEV && 1027 ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE &&
1028 ex_phy->attached_dev_type != SATA_PENDING) { 1028 ex_phy->attached_dev_type != SAS_SATA_PENDING) {
1029 SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx " 1029 SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx "
1030 "phy 0x%x\n", ex_phy->attached_dev_type, 1030 "phy 0x%x\n", ex_phy->attached_dev_type,
1031 SAS_ADDR(dev->sas_addr), 1031 SAS_ADDR(dev->sas_addr),
@@ -1049,11 +1049,11 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
1049 } 1049 }
1050 1050
1051 switch (ex_phy->attached_dev_type) { 1051 switch (ex_phy->attached_dev_type) {
1052 case SAS_END_DEV: 1052 case SAS_END_DEVICE:
1053 case SATA_PENDING: 1053 case SAS_SATA_PENDING:
1054 child = sas_ex_discover_end_dev(dev, phy_id); 1054 child = sas_ex_discover_end_dev(dev, phy_id);
1055 break; 1055 break;
1056 case FANOUT_DEV: 1056 case SAS_FANOUT_EXPANDER_DEVICE:
1057 if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) { 1057 if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) {
1058 SAS_DPRINTK("second fanout expander %016llx phy 0x%x " 1058 SAS_DPRINTK("second fanout expander %016llx phy 0x%x "
1059 "attached to ex %016llx phy 0x%x\n", 1059 "attached to ex %016llx phy 0x%x\n",
@@ -1067,7 +1067,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
1067 memcpy(dev->port->disc.fanout_sas_addr, 1067 memcpy(dev->port->disc.fanout_sas_addr,
1068 ex_phy->attached_sas_addr, SAS_ADDR_SIZE); 1068 ex_phy->attached_sas_addr, SAS_ADDR_SIZE);
1069 /* fallthrough */ 1069 /* fallthrough */
1070 case EDGE_DEV: 1070 case SAS_EDGE_EXPANDER_DEVICE:
1071 child = sas_ex_discover_expander(dev, phy_id); 1071 child = sas_ex_discover_expander(dev, phy_id);
1072 break; 1072 break;
1073 default: 1073 default:
@@ -1111,8 +1111,8 @@ static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr)
1111 phy->phy_state == PHY_NOT_PRESENT) 1111 phy->phy_state == PHY_NOT_PRESENT)
1112 continue; 1112 continue;
1113 1113
1114 if ((phy->attached_dev_type == EDGE_DEV || 1114 if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE ||
1115 phy->attached_dev_type == FANOUT_DEV) && 1115 phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) &&
1116 phy->routing_attr == SUBTRACTIVE_ROUTING) { 1116 phy->routing_attr == SUBTRACTIVE_ROUTING) {
1117 1117
1118 memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE); 1118 memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE);
@@ -1130,8 +1130,8 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev)
1130 u8 sub_addr[8] = {0, }; 1130 u8 sub_addr[8] = {0, };
1131 1131
1132 list_for_each_entry(child, &ex->children, siblings) { 1132 list_for_each_entry(child, &ex->children, siblings) {
1133 if (child->dev_type != EDGE_DEV && 1133 if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
1134 child->dev_type != FANOUT_DEV) 1134 child->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
1135 continue; 1135 continue;
1136 if (sub_addr[0] == 0) { 1136 if (sub_addr[0] == 0) {
1137 sas_find_sub_addr(child, sub_addr); 1137 sas_find_sub_addr(child, sub_addr);
@@ -1208,7 +1208,7 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
1208 int i; 1208 int i;
1209 u8 *sub_sas_addr = NULL; 1209 u8 *sub_sas_addr = NULL;
1210 1210
1211 if (dev->dev_type != EDGE_DEV) 1211 if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE)
1212 return 0; 1212 return 0;
1213 1213
1214 for (i = 0; i < ex->num_phys; i++) { 1214 for (i = 0; i < ex->num_phys; i++) {
@@ -1218,8 +1218,8 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
1218 phy->phy_state == PHY_NOT_PRESENT) 1218 phy->phy_state == PHY_NOT_PRESENT)
1219 continue; 1219 continue;
1220 1220
1221 if ((phy->attached_dev_type == FANOUT_DEV || 1221 if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
1222 phy->attached_dev_type == EDGE_DEV) && 1222 phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) &&
1223 phy->routing_attr == SUBTRACTIVE_ROUTING) { 1223 phy->routing_attr == SUBTRACTIVE_ROUTING) {
1224 1224
1225 if (!sub_sas_addr) 1225 if (!sub_sas_addr)
@@ -1245,8 +1245,8 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
1245 struct ex_phy *child_phy) 1245 struct ex_phy *child_phy)
1246{ 1246{
1247 static const char *ex_type[] = { 1247 static const char *ex_type[] = {
1248 [EDGE_DEV] = "edge", 1248 [SAS_EDGE_EXPANDER_DEVICE] = "edge",
1249 [FANOUT_DEV] = "fanout", 1249 [SAS_FANOUT_EXPANDER_DEVICE] = "fanout",
1250 }; 1250 };
1251 struct domain_device *parent = child->parent; 1251 struct domain_device *parent = child->parent;
1252 1252
@@ -1321,8 +1321,8 @@ static int sas_check_parent_topology(struct domain_device *child)
1321 if (!child->parent) 1321 if (!child->parent)
1322 return 0; 1322 return 0;
1323 1323
1324 if (child->parent->dev_type != EDGE_DEV && 1324 if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
1325 child->parent->dev_type != FANOUT_DEV) 1325 child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
1326 return 0; 1326 return 0;
1327 1327
1328 parent_ex = &child->parent->ex_dev; 1328 parent_ex = &child->parent->ex_dev;
@@ -1341,8 +1341,8 @@ static int sas_check_parent_topology(struct domain_device *child)
1341 child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; 1341 child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
1342 1342
1343 switch (child->parent->dev_type) { 1343 switch (child->parent->dev_type) {
1344 case EDGE_DEV: 1344 case SAS_EDGE_EXPANDER_DEVICE:
1345 if (child->dev_type == FANOUT_DEV) { 1345 if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
1346 if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING || 1346 if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
1347 child_phy->routing_attr != TABLE_ROUTING) { 1347 child_phy->routing_attr != TABLE_ROUTING) {
1348 sas_print_parent_topology_bug(child, parent_phy, child_phy); 1348 sas_print_parent_topology_bug(child, parent_phy, child_phy);
@@ -1366,7 +1366,7 @@ static int sas_check_parent_topology(struct domain_device *child)
1366 } 1366 }
1367 } 1367 }
1368 break; 1368 break;
1369 case FANOUT_DEV: 1369 case SAS_FANOUT_EXPANDER_DEVICE:
1370 if (parent_phy->routing_attr != TABLE_ROUTING || 1370 if (parent_phy->routing_attr != TABLE_ROUTING ||
1371 child_phy->routing_attr != SUBTRACTIVE_ROUTING) { 1371 child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
1372 sas_print_parent_topology_bug(child, parent_phy, child_phy); 1372 sas_print_parent_topology_bug(child, parent_phy, child_phy);
@@ -1619,8 +1619,8 @@ static int sas_ex_level_discovery(struct asd_sas_port *port, const int level)
1619 struct domain_device *dev; 1619 struct domain_device *dev;
1620 1620
1621 list_for_each_entry(dev, &port->dev_list, dev_list_node) { 1621 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
1622 if (dev->dev_type == EDGE_DEV || 1622 if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
1623 dev->dev_type == FANOUT_DEV) { 1623 dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
1624 struct sas_expander_device *ex = 1624 struct sas_expander_device *ex =
1625 rphy_to_expander_device(dev->rphy); 1625 rphy_to_expander_device(dev->rphy);
1626 1626
@@ -1720,7 +1720,7 @@ static int sas_get_phy_change_count(struct domain_device *dev,
1720} 1720}
1721 1721
1722static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, 1722static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
1723 u8 *sas_addr, enum sas_dev_type *type) 1723 u8 *sas_addr, enum sas_device_type *type)
1724{ 1724{
1725 int res; 1725 int res;
1726 struct smp_resp *disc_resp; 1726 struct smp_resp *disc_resp;
@@ -1849,7 +1849,7 @@ static int sas_find_bcast_dev(struct domain_device *dev,
1849 SAS_DPRINTK("Expander phys DID NOT change\n"); 1849 SAS_DPRINTK("Expander phys DID NOT change\n");
1850 } 1850 }
1851 list_for_each_entry(ch, &ex->children, siblings) { 1851 list_for_each_entry(ch, &ex->children, siblings) {
1852 if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { 1852 if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
1853 res = sas_find_bcast_dev(ch, src_dev); 1853 res = sas_find_bcast_dev(ch, src_dev);
1854 if (*src_dev) 1854 if (*src_dev)
1855 return res; 1855 return res;
@@ -1866,8 +1866,8 @@ static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_devi
1866 1866
1867 list_for_each_entry_safe(child, n, &ex->children, siblings) { 1867 list_for_each_entry_safe(child, n, &ex->children, siblings) {
1868 set_bit(SAS_DEV_GONE, &child->state); 1868 set_bit(SAS_DEV_GONE, &child->state);
1869 if (child->dev_type == EDGE_DEV || 1869 if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
1870 child->dev_type == FANOUT_DEV) 1870 child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
1871 sas_unregister_ex_tree(port, child); 1871 sas_unregister_ex_tree(port, child);
1872 else 1872 else
1873 sas_unregister_dev(port, child); 1873 sas_unregister_dev(port, child);
@@ -1887,8 +1887,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
1887 if (SAS_ADDR(child->sas_addr) == 1887 if (SAS_ADDR(child->sas_addr) ==
1888 SAS_ADDR(phy->attached_sas_addr)) { 1888 SAS_ADDR(phy->attached_sas_addr)) {
1889 set_bit(SAS_DEV_GONE, &child->state); 1889 set_bit(SAS_DEV_GONE, &child->state);
1890 if (child->dev_type == EDGE_DEV || 1890 if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
1891 child->dev_type == FANOUT_DEV) 1891 child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
1892 sas_unregister_ex_tree(parent->port, child); 1892 sas_unregister_ex_tree(parent->port, child);
1893 else 1893 else
1894 sas_unregister_dev(parent->port, child); 1894 sas_unregister_dev(parent->port, child);
@@ -1916,8 +1916,8 @@ static int sas_discover_bfs_by_root_level(struct domain_device *root,
1916 int res = 0; 1916 int res = 0;
1917 1917
1918 list_for_each_entry(child, &ex_root->children, siblings) { 1918 list_for_each_entry(child, &ex_root->children, siblings) {
1919 if (child->dev_type == EDGE_DEV || 1919 if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
1920 child->dev_type == FANOUT_DEV) { 1920 child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
1921 struct sas_expander_device *ex = 1921 struct sas_expander_device *ex =
1922 rphy_to_expander_device(child->rphy); 1922 rphy_to_expander_device(child->rphy);
1923 1923
@@ -1970,8 +1970,8 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
1970 list_for_each_entry(child, &dev->ex_dev.children, siblings) { 1970 list_for_each_entry(child, &dev->ex_dev.children, siblings) {
1971 if (SAS_ADDR(child->sas_addr) == 1971 if (SAS_ADDR(child->sas_addr) ==
1972 SAS_ADDR(ex_phy->attached_sas_addr)) { 1972 SAS_ADDR(ex_phy->attached_sas_addr)) {
1973 if (child->dev_type == EDGE_DEV || 1973 if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
1974 child->dev_type == FANOUT_DEV) 1974 child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
1975 res = sas_discover_bfs_by_root(child); 1975 res = sas_discover_bfs_by_root(child);
1976 break; 1976 break;
1977 } 1977 }
@@ -1979,16 +1979,16 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
1979 return res; 1979 return res;
1980} 1980}
1981 1981
1982static bool dev_type_flutter(enum sas_dev_type new, enum sas_dev_type old) 1982static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old)
1983{ 1983{
1984 if (old == new) 1984 if (old == new)
1985 return true; 1985 return true;
1986 1986
1987 /* treat device directed resets as flutter, if we went 1987 /* treat device directed resets as flutter, if we went
1988 * SAS_END_DEV to SATA_PENDING the link needs recovery 1988 * SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery
1989 */ 1989 */
1990 if ((old == SATA_PENDING && new == SAS_END_DEV) || 1990 if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) ||
1991 (old == SAS_END_DEV && new == SATA_PENDING)) 1991 (old == SAS_END_DEVICE && new == SAS_SATA_PENDING))
1992 return true; 1992 return true;
1993 1993
1994 return false; 1994 return false;
@@ -1998,7 +1998,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
1998{ 1998{
1999 struct expander_device *ex = &dev->ex_dev; 1999 struct expander_device *ex = &dev->ex_dev;
2000 struct ex_phy *phy = &ex->ex_phy[phy_id]; 2000 struct ex_phy *phy = &ex->ex_phy[phy_id];
2001 enum sas_dev_type type = NO_DEVICE; 2001 enum sas_device_type type = SAS_PHY_UNUSED;
2002 u8 sas_addr[8]; 2002 u8 sas_addr[8];
2003 int res; 2003 int res;
2004 2004
@@ -2032,7 +2032,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
2032 2032
2033 sas_ex_phy_discover(dev, phy_id); 2033 sas_ex_phy_discover(dev, phy_id);
2034 2034
2035 if (ata_dev && phy->attached_dev_type == SATA_PENDING) 2035 if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING)
2036 action = ", needs recovery"; 2036 action = ", needs recovery";
2037 SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n", 2037 SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n",
2038 SAS_ADDR(dev->sas_addr), phy_id, action); 2038 SAS_ADDR(dev->sas_addr), phy_id, action);
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 1de67964e5a1..7e7ba83f0a21 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -131,16 +131,16 @@ static inline void sas_fill_in_rphy(struct domain_device *dev,
131 rphy->identify.initiator_port_protocols = dev->iproto; 131 rphy->identify.initiator_port_protocols = dev->iproto;
132 rphy->identify.target_port_protocols = dev->tproto; 132 rphy->identify.target_port_protocols = dev->tproto;
133 switch (dev->dev_type) { 133 switch (dev->dev_type) {
134 case SATA_DEV: 134 case SAS_SATA_DEV:
135 /* FIXME: need sata device type */ 135 /* FIXME: need sata device type */
136 case SAS_END_DEV: 136 case SAS_END_DEVICE:
137 case SATA_PENDING: 137 case SAS_SATA_PENDING:
138 rphy->identify.device_type = SAS_END_DEVICE; 138 rphy->identify.device_type = SAS_END_DEVICE;
139 break; 139 break;
140 case EDGE_DEV: 140 case SAS_EDGE_EXPANDER_DEVICE:
141 rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE; 141 rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE;
142 break; 142 break;
143 case FANOUT_DEV: 143 case SAS_FANOUT_EXPANDER_DEVICE:
144 rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE; 144 rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE;
145 break; 145 break;
146 default: 146 default:
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 1398b714c018..d3c5297c6c89 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -69,7 +69,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
69 continue; 69 continue;
70 } 70 }
71 71
72 if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) { 72 if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
73 dev->ex_dev.ex_change_count = -1; 73 dev->ex_dev.ex_change_count = -1;
74 for (i = 0; i < dev->ex_dev.num_phys; i++) { 74 for (i = 0; i < dev->ex_dev.num_phys; i++) {
75 struct ex_phy *phy = &dev->ex_dev.ex_phy[i]; 75 struct ex_phy *phy = &dev->ex_dev.ex_phy[i];
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 7706c99ec8bb..bcc56cac4fd8 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -46,10 +46,15 @@ struct lpfc_sli2_slim;
46#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi 46#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi
47 cmnd for menlo needs nearly twice as for firmware 47 cmnd for menlo needs nearly twice as for firmware
48 downloads using bsg */ 48 downloads using bsg */
49#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ 49
50#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */
51#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
52#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
50#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ 53#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
54#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
55#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
56
51#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ 57#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
52#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
53#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 58#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
54#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ 59#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
55#define LPFC_VNAME_LEN 100 /* vport symbolic name length */ 60#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
@@ -66,8 +71,10 @@ struct lpfc_sli2_slim;
66 * queue depths when there are driver resource error or Firmware 71 * queue depths when there are driver resource error or Firmware
67 * resource error. 72 * resource error.
68 */ 73 */
69#define QUEUE_RAMP_DOWN_INTERVAL (1 * HZ) /* 1 Second */ 74/* 1 Second */
70#define QUEUE_RAMP_UP_INTERVAL (300 * HZ) /* 5 minutes */ 75#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1))
76/* 5 minutes */
77#define QUEUE_RAMP_UP_INTERVAL (msecs_to_jiffies(1000 * 300))
71 78
72/* Number of exchanges reserved for discovery to complete */ 79/* Number of exchanges reserved for discovery to complete */
73#define LPFC_DISC_IOCB_BUFF_COUNT 20 80#define LPFC_DISC_IOCB_BUFF_COUNT 20
@@ -671,6 +678,7 @@ struct lpfc_hba {
671 uint32_t lmt; 678 uint32_t lmt;
672 679
673 uint32_t fc_topology; /* link topology, from LINK INIT */ 680 uint32_t fc_topology; /* link topology, from LINK INIT */
681 uint32_t fc_topology_changed; /* link topology, from LINK INIT */
674 682
675 struct lpfc_stats fc_stat; 683 struct lpfc_stats fc_stat;
676 684
@@ -701,9 +709,11 @@ struct lpfc_hba {
701 uint32_t cfg_poll_tmo; 709 uint32_t cfg_poll_tmo;
702 uint32_t cfg_use_msi; 710 uint32_t cfg_use_msi;
703 uint32_t cfg_fcp_imax; 711 uint32_t cfg_fcp_imax;
712 uint32_t cfg_fcp_cpu_map;
704 uint32_t cfg_fcp_wq_count; 713 uint32_t cfg_fcp_wq_count;
705 uint32_t cfg_fcp_eq_count; 714 uint32_t cfg_fcp_eq_count;
706 uint32_t cfg_fcp_io_channel; 715 uint32_t cfg_fcp_io_channel;
716 uint32_t cfg_total_seg_cnt;
707 uint32_t cfg_sg_seg_cnt; 717 uint32_t cfg_sg_seg_cnt;
708 uint32_t cfg_prot_sg_seg_cnt; 718 uint32_t cfg_prot_sg_seg_cnt;
709 uint32_t cfg_sg_dma_buf_size; 719 uint32_t cfg_sg_dma_buf_size;
@@ -804,8 +814,10 @@ struct lpfc_hba {
804 uint64_t bg_reftag_err_cnt; 814 uint64_t bg_reftag_err_cnt;
805 815
806 /* fastpath list. */ 816 /* fastpath list. */
807 spinlock_t scsi_buf_list_lock; 817 spinlock_t scsi_buf_list_get_lock; /* SCSI buf alloc list lock */
808 struct list_head lpfc_scsi_buf_list; 818 spinlock_t scsi_buf_list_put_lock; /* SCSI buf free list lock */
819 struct list_head lpfc_scsi_buf_list_get;
820 struct list_head lpfc_scsi_buf_list_put;
809 uint32_t total_scsi_bufs; 821 uint32_t total_scsi_bufs;
810 struct list_head lpfc_iocb_list; 822 struct list_head lpfc_iocb_list;
811 uint32_t total_iocbq_bufs; 823 uint32_t total_iocbq_bufs;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 9290713af253..3c5625b8b1f4 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -674,6 +674,9 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
674 int i; 674 int i;
675 int rc; 675 int rc;
676 676
677 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
678 return 0;
679
677 init_completion(&online_compl); 680 init_completion(&online_compl);
678 rc = lpfc_workq_post_event(phba, &status, &online_compl, 681 rc = lpfc_workq_post_event(phba, &status, &online_compl,
679 LPFC_EVT_OFFLINE_PREP); 682 LPFC_EVT_OFFLINE_PREP);
@@ -741,7 +744,8 @@ lpfc_selective_reset(struct lpfc_hba *phba)
741 int status = 0; 744 int status = 0;
742 int rc; 745 int rc;
743 746
744 if (!phba->cfg_enable_hba_reset) 747 if ((!phba->cfg_enable_hba_reset) ||
748 (phba->pport->fc_flag & FC_OFFLINE_MODE))
745 return -EACCES; 749 return -EACCES;
746 750
747 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 751 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
@@ -895,6 +899,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
895 pci_disable_sriov(pdev); 899 pci_disable_sriov(pdev);
896 phba->cfg_sriov_nr_virtfn = 0; 900 phba->cfg_sriov_nr_virtfn = 0;
897 } 901 }
902
898 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 903 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
899 904
900 if (status != 0) 905 if (status != 0)
@@ -2801,6 +2806,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
2801 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 2806 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2802 "3054 lpfc_topology changed from %d to %d\n", 2807 "3054 lpfc_topology changed from %d to %d\n",
2803 prev_val, val); 2808 prev_val, val);
2809 if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
2810 phba->fc_topology_changed = 1;
2804 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); 2811 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
2805 if (err) { 2812 if (err) {
2806 phba->cfg_topology = prev_val; 2813 phba->cfg_topology = prev_val;
@@ -3792,6 +3799,141 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
3792static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR, 3799static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
3793 lpfc_fcp_imax_show, lpfc_fcp_imax_store); 3800 lpfc_fcp_imax_show, lpfc_fcp_imax_store);
3794 3801
3802/**
3803 * lpfc_state_show - Display current driver CPU affinity
3804 * @dev: class converted to a Scsi_host structure.
3805 * @attr: device attribute, not used.
3806 * @buf: on return contains text describing the state of the link.
3807 *
3808 * Returns: size of formatted string.
3809 **/
3810static ssize_t
3811lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
3812 char *buf)
3813{
3814 struct Scsi_Host *shost = class_to_shost(dev);
3815 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
3816 struct lpfc_hba *phba = vport->phba;
3817 struct lpfc_vector_map_info *cpup;
3818 int idx, len = 0;
3819
3820 if ((phba->sli_rev != LPFC_SLI_REV4) ||
3821 (phba->intr_type != MSIX))
3822 return len;
3823
3824 switch (phba->cfg_fcp_cpu_map) {
3825 case 0:
3826 len += snprintf(buf + len, PAGE_SIZE-len,
3827 "fcp_cpu_map: No mapping (%d)\n",
3828 phba->cfg_fcp_cpu_map);
3829 return len;
3830 case 1:
3831 len += snprintf(buf + len, PAGE_SIZE-len,
3832 "fcp_cpu_map: HBA centric mapping (%d): "
3833 "%d online CPUs\n",
3834 phba->cfg_fcp_cpu_map,
3835 phba->sli4_hba.num_online_cpu);
3836 break;
3837 case 2:
3838 len += snprintf(buf + len, PAGE_SIZE-len,
3839 "fcp_cpu_map: Driver centric mapping (%d): "
3840 "%d online CPUs\n",
3841 phba->cfg_fcp_cpu_map,
3842 phba->sli4_hba.num_online_cpu);
3843 break;
3844 }
3845
3846 cpup = phba->sli4_hba.cpu_map;
3847 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
3848 if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
3849 len += snprintf(buf + len, PAGE_SIZE-len,
3850 "CPU %02d io_chan %02d "
3851 "physid %d coreid %d\n",
3852 idx, cpup->channel_id, cpup->phys_id,
3853 cpup->core_id);
3854 else
3855 len += snprintf(buf + len, PAGE_SIZE-len,
3856 "CPU %02d io_chan %02d "
3857 "physid %d coreid %d IRQ %d\n",
3858 idx, cpup->channel_id, cpup->phys_id,
3859 cpup->core_id, cpup->irq);
3860
3861 cpup++;
3862 }
3863 return len;
3864}
3865
3866/**
3867 * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
3868 * @dev: class device that is converted into a Scsi_host.
3869 * @attr: device attribute, not used.
3870 * @buf: one or more lpfc_polling_flags values.
3871 * @count: not used.
3872 *
3873 * Returns:
3874 * -EINVAL - Not implemented yet.
3875 **/
3876static ssize_t
3877lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
3878 const char *buf, size_t count)
3879{
3880 int status = -EINVAL;
3881 return status;
3882}
3883
3884/*
3885# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
3886# for the HBA.
3887#
3888# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2).
3889# 0 - Do not affinitze IRQ vectors
3890# 1 - Affintize HBA vectors with respect to each HBA
3891# (start with CPU0 for each HBA)
3892# 2 - Affintize HBA vectors with respect to the entire driver
3893# (round robin thru all CPUs across all HBAs)
3894*/
3895static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
3896module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
3897MODULE_PARM_DESC(lpfc_fcp_cpu_map,
3898 "Defines how to map CPUs to IRQ vectors per HBA");
3899
3900/**
3901 * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
3902 * @phba: lpfc_hba pointer.
3903 * @val: link speed value.
3904 *
3905 * Description:
3906 * If val is in a valid range [0-2], then affinitze the adapter's
3907 * MSIX vectors.
3908 *
3909 * Returns:
3910 * zero if val saved.
3911 * -EINVAL val out of range
3912 **/
3913static int
3914lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
3915{
3916 if (phba->sli_rev != LPFC_SLI_REV4) {
3917 phba->cfg_fcp_cpu_map = 0;
3918 return 0;
3919 }
3920
3921 if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
3922 phba->cfg_fcp_cpu_map = val;
3923 return 0;
3924 }
3925
3926 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3927 "3326 fcp_cpu_map: %d out of range, using default\n",
3928 val);
3929 phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
3930
3931 return 0;
3932}
3933
3934static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR,
3935 lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store);
3936
3795/* 3937/*
3796# lpfc_fcp_class: Determines FC class to use for the FCP protocol. 3938# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
3797# Value range is [2,3]. Default value is 3. 3939# Value range is [2,3]. Default value is 3.
@@ -4009,12 +4151,11 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
4009# 0 = disabled (default) 4151# 0 = disabled (default)
4010# 1 = enabled 4152# 1 = enabled
4011# Value range is [0,1]. Default value is 0. 4153# Value range is [0,1]. Default value is 0.
4154#
4155# This feature in under investigation and may be supported in the future.
4012*/ 4156*/
4013unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF; 4157unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
4014 4158
4015module_param(lpfc_fcp_look_ahead, uint, S_IRUGO);
4016MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions");
4017
4018/* 4159/*
4019# lpfc_prot_mask: i 4160# lpfc_prot_mask: i
4020# - Bit mask of host protection capabilities used to register with the 4161# - Bit mask of host protection capabilities used to register with the
@@ -4071,16 +4212,23 @@ MODULE_PARM_DESC(lpfc_delay_discovery,
4071 4212
4072/* 4213/*
4073 * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count 4214 * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
4074 * This value can be set to values between 64 and 256. The default value is 4215 * This value can be set to values between 64 and 4096. The default value is
4075 * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer 4216 * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
4076 * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE). 4217 * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
4218 * Because of the additional overhead involved in setting up T10-DIF,
4219 * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
4220 * and will be limited to 512 if BlockGuard is enabled under SLI3.
4077 */ 4221 */
4078LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, 4222LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
4079 LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); 4223 LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
4080 4224
4081LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT, 4225/*
4082 LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT, 4226 * This parameter will be depricated, the driver cannot limit the
4083 "Max Protection Scatter Gather Segment Count"); 4227 * protection data s/g list.
4228 */
4229LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT,
4230 LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT,
4231 "Max Protection Scatter Gather Segment Count");
4084 4232
4085struct device_attribute *lpfc_hba_attrs[] = { 4233struct device_attribute *lpfc_hba_attrs[] = {
4086 &dev_attr_bg_info, 4234 &dev_attr_bg_info,
@@ -4141,6 +4289,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
4141 &dev_attr_lpfc_poll_tmo, 4289 &dev_attr_lpfc_poll_tmo,
4142 &dev_attr_lpfc_use_msi, 4290 &dev_attr_lpfc_use_msi,
4143 &dev_attr_lpfc_fcp_imax, 4291 &dev_attr_lpfc_fcp_imax,
4292 &dev_attr_lpfc_fcp_cpu_map,
4144 &dev_attr_lpfc_fcp_wq_count, 4293 &dev_attr_lpfc_fcp_wq_count,
4145 &dev_attr_lpfc_fcp_eq_count, 4294 &dev_attr_lpfc_fcp_eq_count,
4146 &dev_attr_lpfc_fcp_io_channel, 4295 &dev_attr_lpfc_fcp_io_channel,
@@ -5123,6 +5272,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
5123 lpfc_enable_rrq_init(phba, lpfc_enable_rrq); 5272 lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
5124 lpfc_use_msi_init(phba, lpfc_use_msi); 5273 lpfc_use_msi_init(phba, lpfc_use_msi);
5125 lpfc_fcp_imax_init(phba, lpfc_fcp_imax); 5274 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
5275 lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
5126 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); 5276 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
5127 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); 5277 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
5128 lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); 5278 lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 888666892004..094be2cad65b 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -219,26 +219,35 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
219 unsigned int transfer_bytes, bytes_copied = 0; 219 unsigned int transfer_bytes, bytes_copied = 0;
220 unsigned int sg_offset, dma_offset; 220 unsigned int sg_offset, dma_offset;
221 unsigned char *dma_address, *sg_address; 221 unsigned char *dma_address, *sg_address;
222 struct scatterlist *sgel;
223 LIST_HEAD(temp_list); 222 LIST_HEAD(temp_list);
224 223 struct sg_mapping_iter miter;
224 unsigned long flags;
225 unsigned int sg_flags = SG_MITER_ATOMIC;
226 bool sg_valid;
225 227
226 list_splice_init(&dma_buffers->list, &temp_list); 228 list_splice_init(&dma_buffers->list, &temp_list);
227 list_add(&dma_buffers->list, &temp_list); 229 list_add(&dma_buffers->list, &temp_list);
228 sg_offset = 0; 230 sg_offset = 0;
229 sgel = bsg_buffers->sg_list; 231 if (to_buffers)
232 sg_flags |= SG_MITER_FROM_SG;
233 else
234 sg_flags |= SG_MITER_TO_SG;
235 sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
236 sg_flags);
237 local_irq_save(flags);
238 sg_valid = sg_miter_next(&miter);
230 list_for_each_entry(mp, &temp_list, list) { 239 list_for_each_entry(mp, &temp_list, list) {
231 dma_offset = 0; 240 dma_offset = 0;
232 while (bytes_to_transfer && sgel && 241 while (bytes_to_transfer && sg_valid &&
233 (dma_offset < LPFC_BPL_SIZE)) { 242 (dma_offset < LPFC_BPL_SIZE)) {
234 dma_address = mp->virt + dma_offset; 243 dma_address = mp->virt + dma_offset;
235 if (sg_offset) { 244 if (sg_offset) {
236 /* Continue previous partial transfer of sg */ 245 /* Continue previous partial transfer of sg */
237 sg_address = sg_virt(sgel) + sg_offset; 246 sg_address = miter.addr + sg_offset;
238 transfer_bytes = sgel->length - sg_offset; 247 transfer_bytes = miter.length - sg_offset;
239 } else { 248 } else {
240 sg_address = sg_virt(sgel); 249 sg_address = miter.addr;
241 transfer_bytes = sgel->length; 250 transfer_bytes = miter.length;
242 } 251 }
243 if (bytes_to_transfer < transfer_bytes) 252 if (bytes_to_transfer < transfer_bytes)
244 transfer_bytes = bytes_to_transfer; 253 transfer_bytes = bytes_to_transfer;
@@ -252,12 +261,14 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
252 sg_offset += transfer_bytes; 261 sg_offset += transfer_bytes;
253 bytes_to_transfer -= transfer_bytes; 262 bytes_to_transfer -= transfer_bytes;
254 bytes_copied += transfer_bytes; 263 bytes_copied += transfer_bytes;
255 if (sg_offset >= sgel->length) { 264 if (sg_offset >= miter.length) {
256 sg_offset = 0; 265 sg_offset = 0;
257 sgel = sg_next(sgel); 266 sg_valid = sg_miter_next(&miter);
258 } 267 }
259 } 268 }
260 } 269 }
270 sg_miter_stop(&miter);
271 local_irq_restore(flags);
261 list_del_init(&dma_buffers->list); 272 list_del_init(&dma_buffers->list);
262 list_splice(&temp_list, &dma_buffers->list); 273 list_splice(&temp_list, &dma_buffers->list);
263 return bytes_copied; 274 return bytes_copied;
@@ -471,6 +482,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
471 cmdiocbq->context1 = dd_data; 482 cmdiocbq->context1 = dd_data;
472 cmdiocbq->context2 = cmp; 483 cmdiocbq->context2 = cmp;
473 cmdiocbq->context3 = bmp; 484 cmdiocbq->context3 = bmp;
485 cmdiocbq->context_un.ndlp = ndlp;
474 dd_data->type = TYPE_IOCB; 486 dd_data->type = TYPE_IOCB;
475 dd_data->set_job = job; 487 dd_data->set_job = job;
476 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 488 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
@@ -1508,6 +1520,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1508 ctiocb->context1 = dd_data; 1520 ctiocb->context1 = dd_data;
1509 ctiocb->context2 = cmp; 1521 ctiocb->context2 = cmp;
1510 ctiocb->context3 = bmp; 1522 ctiocb->context3 = bmp;
1523 ctiocb->context_un.ndlp = ndlp;
1511 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; 1524 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1512 1525
1513 dd_data->type = TYPE_IOCB; 1526 dd_data->type = TYPE_IOCB;
@@ -2576,7 +2589,8 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2576 evt->wait_time_stamp = jiffies; 2589 evt->wait_time_stamp = jiffies;
2577 time_left = wait_event_interruptible_timeout( 2590 time_left = wait_event_interruptible_timeout(
2578 evt->wq, !list_empty(&evt->events_to_see), 2591 evt->wq, !list_empty(&evt->events_to_see),
2579 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); 2592 msecs_to_jiffies(1000 *
2593 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
2580 if (list_empty(&evt->events_to_see)) 2594 if (list_empty(&evt->events_to_see))
2581 ret_val = (time_left) ? -EINTR : -ETIMEDOUT; 2595 ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2582 else { 2596 else {
@@ -3151,7 +3165,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
3151 evt->waiting = 1; 3165 evt->waiting = 1;
3152 time_left = wait_event_interruptible_timeout( 3166 time_left = wait_event_interruptible_timeout(
3153 evt->wq, !list_empty(&evt->events_to_see), 3167 evt->wq, !list_empty(&evt->events_to_see),
3154 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); 3168 msecs_to_jiffies(1000 *
3169 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
3155 evt->waiting = 0; 3170 evt->waiting = 0;
3156 if (list_empty(&evt->events_to_see)) { 3171 if (list_empty(&evt->events_to_see)) {
3157 rc = (time_left) ? -EINTR : -ETIMEDOUT; 3172 rc = (time_left) ? -EINTR : -ETIMEDOUT;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 7631893ae005..d41456e5f814 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -470,3 +470,4 @@ int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
470void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); 470void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
471uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); 471uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
472int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t); 472int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
473void lpfc_sli4_offline_eratt(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 7bff3a19af56..ae1a07c57cae 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1811,7 +1811,8 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport)
1811 if (init_utsname()->nodename[0] != '\0') 1811 if (init_utsname()->nodename[0] != '\0')
1812 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); 1812 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
1813 else 1813 else
1814 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); 1814 mod_timer(&vport->fc_fdmitmo, jiffies +
1815 msecs_to_jiffies(1000 * 60));
1815 } 1816 }
1816 return; 1817 return;
1817} 1818}
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index bbed8471bf0b..3cae0a92e8bd 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -29,6 +29,7 @@
29#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h> 30#include <scsi/scsi_transport_fc.h>
31 31
32
32#include "lpfc_hw4.h" 33#include "lpfc_hw4.h"
33#include "lpfc_hw.h" 34#include "lpfc_hw.h"
34#include "lpfc_sli.h" 35#include "lpfc_sli.h"
@@ -238,7 +239,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
238 239
239 icmd->un.elsreq64.remoteID = did; /* DID */ 240 icmd->un.elsreq64.remoteID = did; /* DID */
240 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 241 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
241 icmd->ulpTimeout = phba->fc_ratov * 2; 242 if (elscmd == ELS_CMD_FLOGI)
243 icmd->ulpTimeout = FF_DEF_RATOV * 2;
244 else
245 icmd->ulpTimeout = phba->fc_ratov * 2;
242 } else { 246 } else {
243 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 247 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
244 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 248 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
@@ -308,16 +312,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
308 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 312 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
309 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 313 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
310 "0116 Xmit ELS command x%x to remote " 314 "0116 Xmit ELS command x%x to remote "
311 "NPORT x%x I/O tag: x%x, port state: x%x\n", 315 "NPORT x%x I/O tag: x%x, port state:x%x"
316 " fc_flag:x%x\n",
312 elscmd, did, elsiocb->iotag, 317 elscmd, did, elsiocb->iotag,
313 vport->port_state); 318 vport->port_state,
319 vport->fc_flag);
314 } else { 320 } else {
315 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 321 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
316 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 322 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
317 "0117 Xmit ELS response x%x to remote " 323 "0117 Xmit ELS response x%x to remote "
318 "NPORT x%x I/O tag: x%x, size: x%x\n", 324 "NPORT x%x I/O tag: x%x, size: x%x "
325 "port_state x%x fc_flag x%x\n",
319 elscmd, ndlp->nlp_DID, elsiocb->iotag, 326 elscmd, ndlp->nlp_DID, elsiocb->iotag,
320 cmdSize); 327 cmdSize, vport->port_state,
328 vport->fc_flag);
321 } 329 }
322 return elsiocb; 330 return elsiocb;
323 331
@@ -909,6 +917,23 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
909 spin_lock_irq(shost->host_lock); 917 spin_lock_irq(shost->host_lock);
910 vport->fc_flag |= FC_PT2PT; 918 vport->fc_flag |= FC_PT2PT;
911 spin_unlock_irq(shost->host_lock); 919 spin_unlock_irq(shost->host_lock);
920 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
921 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
922 lpfc_unregister_fcf_prep(phba);
923
924 /* The FC_VFI_REGISTERED flag will get clear in the cmpl
925 * handler for unreg_vfi, but if we don't force the
926 * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be
927 * built with the update bit set instead of just the vp bit to
928 * change the Nport ID. We need to have the vp set and the
929 * Upd cleared on topology changes.
930 */
931 spin_lock_irq(shost->host_lock);
932 vport->fc_flag &= ~FC_VFI_REGISTERED;
933 spin_unlock_irq(shost->host_lock);
934 phba->fc_topology_changed = 0;
935 lpfc_issue_reg_vfi(vport);
936 }
912 937
913 /* Start discovery - this should just do CLEAR_LA */ 938 /* Start discovery - this should just do CLEAR_LA */
914 lpfc_disc_start(vport); 939 lpfc_disc_start(vport);
@@ -1030,9 +1055,19 @@ stop_rr_fcf_flogi:
1030 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1055 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
1031 if ((phba->sli_rev == LPFC_SLI_REV4) && 1056 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1032 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1057 (!(vport->fc_flag & FC_VFI_REGISTERED) ||
1033 (vport->fc_prevDID != vport->fc_myDID))) { 1058 (vport->fc_prevDID != vport->fc_myDID) ||
1034 if (vport->fc_flag & FC_VFI_REGISTERED) 1059 phba->fc_topology_changed)) {
1035 lpfc_sli4_unreg_all_rpis(vport); 1060 if (vport->fc_flag & FC_VFI_REGISTERED) {
1061 if (phba->fc_topology_changed) {
1062 lpfc_unregister_fcf_prep(phba);
1063 spin_lock_irq(shost->host_lock);
1064 vport->fc_flag &= ~FC_VFI_REGISTERED;
1065 spin_unlock_irq(shost->host_lock);
1066 phba->fc_topology_changed = 0;
1067 } else {
1068 lpfc_sli4_unreg_all_rpis(vport);
1069 }
1070 }
1036 lpfc_issue_reg_vfi(vport); 1071 lpfc_issue_reg_vfi(vport);
1037 lpfc_nlp_put(ndlp); 1072 lpfc_nlp_put(ndlp);
1038 goto out; 1073 goto out;
@@ -1054,10 +1089,11 @@ stop_rr_fcf_flogi:
1054 1089
1055 /* FLOGI completes successfully */ 1090 /* FLOGI completes successfully */
1056 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1091 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1057 "0101 FLOGI completes successfully " 1092 "0101 FLOGI completes successfully, I/O tag:x%x, "
1058 "Data: x%x x%x x%x x%x\n", 1093 "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag,
1059 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 1094 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
1060 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); 1095 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
1096 vport->port_state, vport->fc_flag);
1061 1097
1062 if (vport->port_state == LPFC_FLOGI) { 1098 if (vport->port_state == LPFC_FLOGI) {
1063 /* 1099 /*
@@ -5047,6 +5083,8 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5047 struct ls_rjt stat; 5083 struct ls_rjt stat;
5048 uint32_t cmd, did; 5084 uint32_t cmd, did;
5049 int rc; 5085 int rc;
5086 uint32_t fc_flag = 0;
5087 uint32_t port_state = 0;
5050 5088
5051 cmd = *lp++; 5089 cmd = *lp++;
5052 sp = (struct serv_parm *) lp; 5090 sp = (struct serv_parm *) lp;
@@ -5113,16 +5151,25 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5113 * will be. 5151 * will be.
5114 */ 5152 */
5115 vport->fc_myDID = PT2PT_LocalID; 5153 vport->fc_myDID = PT2PT_LocalID;
5116 } 5154 } else
5155 vport->fc_myDID = PT2PT_RemoteID;
5117 5156
5118 /* 5157 /*
5119 * The vport state should go to LPFC_FLOGI only 5158 * The vport state should go to LPFC_FLOGI only
5120 * AFTER we issue a FLOGI, not receive one. 5159 * AFTER we issue a FLOGI, not receive one.
5121 */ 5160 */
5122 spin_lock_irq(shost->host_lock); 5161 spin_lock_irq(shost->host_lock);
5162 fc_flag = vport->fc_flag;
5163 port_state = vport->port_state;
5123 vport->fc_flag |= FC_PT2PT; 5164 vport->fc_flag |= FC_PT2PT;
5124 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 5165 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
5166 vport->port_state = LPFC_FLOGI;
5125 spin_unlock_irq(shost->host_lock); 5167 spin_unlock_irq(shost->host_lock);
5168 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5169 "3311 Rcv Flogi PS x%x new PS x%x "
5170 "fc_flag x%x new fc_flag x%x\n",
5171 port_state, vport->port_state,
5172 fc_flag, vport->fc_flag);
5126 5173
5127 /* 5174 /*
5128 * We temporarily set fc_myDID to make it look like we are 5175 * We temporarily set fc_myDID to make it look like we are
@@ -6241,7 +6288,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
6241 } 6288 }
6242 6289
6243 if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq)) 6290 if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
6244 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 6291 mod_timer(&vport->els_tmofunc,
6292 jiffies + msecs_to_jiffies(1000 * timeout));
6245} 6293}
6246 6294
6247/** 6295/**
@@ -6612,7 +6660,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6612 /* ELS command <elsCmd> received from NPORT <did> */ 6660 /* ELS command <elsCmd> received from NPORT <did> */
6613 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6661 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6614 "0112 ELS command x%x received from NPORT x%x " 6662 "0112 ELS command x%x received from NPORT x%x "
6615 "Data: x%x\n", cmd, did, vport->port_state); 6663 "Data: x%x x%x x%x x%x\n",
6664 cmd, did, vport->port_state, vport->fc_flag,
6665 vport->fc_myDID, vport->fc_prevDID);
6616 switch (cmd) { 6666 switch (cmd) {
6617 case ELS_CMD_PLOGI: 6667 case ELS_CMD_PLOGI:
6618 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6668 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -6621,6 +6671,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6621 6671
6622 phba->fc_stat.elsRcvPLOGI++; 6672 phba->fc_stat.elsRcvPLOGI++;
6623 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 6673 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
6674 if (phba->sli_rev == LPFC_SLI_REV4 &&
6675 (phba->pport->fc_flag & FC_PT2PT)) {
6676 vport->fc_prevDID = vport->fc_myDID;
6677 /* Our DID needs to be updated before registering
6678 * the vfi. This is done in lpfc_rcv_plogi but
6679 * that is called after the reg_vfi.
6680 */
6681 vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
6682 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6683 "3312 Remote port assigned DID x%x "
6684 "%x\n", vport->fc_myDID,
6685 vport->fc_prevDID);
6686 }
6624 6687
6625 lpfc_send_els_event(vport, ndlp, payload); 6688 lpfc_send_els_event(vport, ndlp, payload);
6626 6689
@@ -6630,6 +6693,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6630 rjt_exp = LSEXP_NOTHING_MORE; 6693 rjt_exp = LSEXP_NOTHING_MORE;
6631 break; 6694 break;
6632 } 6695 }
6696 shost = lpfc_shost_from_vport(vport);
6633 if (vport->port_state < LPFC_DISC_AUTH) { 6697 if (vport->port_state < LPFC_DISC_AUTH) {
6634 if (!(phba->pport->fc_flag & FC_PT2PT) || 6698 if (!(phba->pport->fc_flag & FC_PT2PT) ||
6635 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 6699 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -6641,9 +6705,18 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6641 * another NPort and the other side has initiated 6705 * another NPort and the other side has initiated
6642 * the PLOGI before responding to our FLOGI. 6706 * the PLOGI before responding to our FLOGI.
6643 */ 6707 */
6708 if (phba->sli_rev == LPFC_SLI_REV4 &&
6709 (phba->fc_topology_changed ||
6710 vport->fc_myDID != vport->fc_prevDID)) {
6711 lpfc_unregister_fcf_prep(phba);
6712 spin_lock_irq(shost->host_lock);
6713 vport->fc_flag &= ~FC_VFI_REGISTERED;
6714 spin_unlock_irq(shost->host_lock);
6715 phba->fc_topology_changed = 0;
6716 lpfc_issue_reg_vfi(vport);
6717 }
6644 } 6718 }
6645 6719
6646 shost = lpfc_shost_from_vport(vport);
6647 spin_lock_irq(shost->host_lock); 6720 spin_lock_irq(shost->host_lock);
6648 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 6721 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
6649 spin_unlock_irq(shost->host_lock); 6722 spin_unlock_irq(shost->host_lock);
@@ -7002,8 +7075,11 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
7002 spin_lock_irq(shost->host_lock); 7075 spin_lock_irq(shost->host_lock);
7003 if (vport->fc_flag & FC_DISC_DELAYED) { 7076 if (vport->fc_flag & FC_DISC_DELAYED) {
7004 spin_unlock_irq(shost->host_lock); 7077 spin_unlock_irq(shost->host_lock);
7078 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
7079 "3334 Delay fc port discovery for %d seconds\n",
7080 phba->fc_ratov);
7005 mod_timer(&vport->delayed_disc_tmo, 7081 mod_timer(&vport->delayed_disc_tmo,
7006 jiffies + HZ * phba->fc_ratov); 7082 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
7007 return; 7083 return;
7008 } 7084 }
7009 spin_unlock_irq(shost->host_lock); 7085 spin_unlock_irq(shost->host_lock);
@@ -7287,7 +7363,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
7287 return; 7363 return;
7288 7364
7289 shost = lpfc_shost_from_vport(phba->pport); 7365 shost = lpfc_shost_from_vport(phba->pport);
7290 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 7366 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
7291 spin_lock_irq(shost->host_lock); 7367 spin_lock_irq(shost->host_lock);
7292 ndlp->nlp_flag |= NLP_DELAY_TMO; 7368 ndlp->nlp_flag |= NLP_DELAY_TMO;
7293 spin_unlock_irq(shost->host_lock); 7369 spin_unlock_irq(shost->host_lock);
@@ -7791,7 +7867,8 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
7791 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 7867 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
7792 /* Start a timer to unblock fabric iocbs after 100ms */ 7868 /* Start a timer to unblock fabric iocbs after 100ms */
7793 if (!blocked) 7869 if (!blocked)
7794 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); 7870 mod_timer(&phba->fabric_block_timer,
7871 jiffies + msecs_to_jiffies(100));
7795 7872
7796 return; 7873 return;
7797} 7874}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 326e05a65a73..0f6e2548f35d 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -160,11 +160,12 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
160 if (!list_empty(&evtp->evt_listp)) 160 if (!list_empty(&evtp->evt_listp))
161 return; 161 return;
162 162
163 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
164
163 spin_lock_irq(&phba->hbalock); 165 spin_lock_irq(&phba->hbalock);
164 /* We need to hold the node by incrementing the reference 166 /* We need to hold the node by incrementing the reference
165 * count until this queued work is done 167 * count until this queued work is done
166 */ 168 */
167 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
168 if (evtp->evt_arg1) { 169 if (evtp->evt_arg1) {
169 evtp->evt = LPFC_EVT_DEV_LOSS; 170 evtp->evt = LPFC_EVT_DEV_LOSS;
170 list_add_tail(&evtp->evt_listp, &phba->work_list); 171 list_add_tail(&evtp->evt_listp, &phba->work_list);
@@ -1008,9 +1009,6 @@ lpfc_linkup(struct lpfc_hba *phba)
1008 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 1009 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1009 lpfc_linkup_port(vports[i]); 1010 lpfc_linkup_port(vports[i]);
1010 lpfc_destroy_vport_work_array(phba, vports); 1011 lpfc_destroy_vport_work_array(phba, vports);
1011 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1012 (phba->sli_rev < LPFC_SLI_REV4))
1013 lpfc_issue_clear_la(phba, phba->pport);
1014 1012
1015 return 0; 1013 return 0;
1016} 1014}
@@ -1436,7 +1434,8 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1436 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1434 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1437 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1435 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1438 phba->hba_flag &= ~FCF_TS_INPROG; 1436 phba->hba_flag &= ~FCF_TS_INPROG;
1439 if (phba->pport->port_state != LPFC_FLOGI) { 1437 if (phba->pport->port_state != LPFC_FLOGI &&
1438 phba->pport->fc_flag & FC_FABRIC) {
1440 phba->hba_flag |= FCF_RR_INPROG; 1439 phba->hba_flag |= FCF_RR_INPROG;
1441 spin_unlock_irq(&phba->hbalock); 1440 spin_unlock_irq(&phba->hbalock);
1442 lpfc_initial_flogi(phba->pport); 1441 lpfc_initial_flogi(phba->pport);
@@ -2270,8 +2269,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2270 spin_unlock_irq(&phba->hbalock); 2269 spin_unlock_irq(&phba->hbalock);
2271 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2270 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2272 "2836 New FCF matches in-use " 2271 "2836 New FCF matches in-use "
2273 "FCF (x%x)\n", 2272 "FCF (x%x), port_state:x%x, "
2274 phba->fcf.current_rec.fcf_indx); 2273 "fc_flag:x%x\n",
2274 phba->fcf.current_rec.fcf_indx,
2275 phba->pport->port_state,
2276 phba->pport->fc_flag);
2275 goto out; 2277 goto out;
2276 } else 2278 } else
2277 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 2279 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
@@ -2796,7 +2798,19 @@ void
2796lpfc_issue_init_vpi(struct lpfc_vport *vport) 2798lpfc_issue_init_vpi(struct lpfc_vport *vport)
2797{ 2799{
2798 LPFC_MBOXQ_t *mboxq; 2800 LPFC_MBOXQ_t *mboxq;
2799 int rc; 2801 int rc, vpi;
2802
2803 if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
2804 vpi = lpfc_alloc_vpi(vport->phba);
2805 if (!vpi) {
2806 lpfc_printf_vlog(vport, KERN_ERR,
2807 LOG_MBOX,
2808 "3303 Failed to obtain vport vpi\n");
2809 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2810 return;
2811 }
2812 vport->vpi = vpi;
2813 }
2800 2814
2801 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); 2815 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
2802 if (!mboxq) { 2816 if (!mboxq) {
@@ -2894,9 +2908,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2894 goto out_free_mem; 2908 goto out_free_mem;
2895 } 2909 }
2896 2910
2897 /* If the VFI is already registered, there is nothing else to do */ 2911 /* If the VFI is already registered, there is nothing else to do
2912 * Unless this was a VFI update and we are in PT2PT mode, then
2913 * we should drop through to set the port state to ready.
2914 */
2898 if (vport->fc_flag & FC_VFI_REGISTERED) 2915 if (vport->fc_flag & FC_VFI_REGISTERED)
2899 goto out_free_mem; 2916 if (!(phba->sli_rev == LPFC_SLI_REV4 &&
2917 vport->fc_flag & FC_PT2PT))
2918 goto out_free_mem;
2900 2919
2901 /* The VPI is implicitly registered when the VFI is registered */ 2920 /* The VPI is implicitly registered when the VFI is registered */
2902 spin_lock_irq(shost->host_lock); 2921 spin_lock_irq(shost->host_lock);
@@ -2913,6 +2932,13 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2913 goto out_free_mem; 2932 goto out_free_mem;
2914 } 2933 }
2915 2934
2935 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2936 "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
2937 "alpacnt:%d LinkState:%x topology:%x\n",
2938 vport->port_state, vport->fc_flag, vport->fc_myDID,
2939 vport->phba->alpa_map[0],
2940 phba->link_state, phba->fc_topology);
2941
2916 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2942 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
2917 /* 2943 /*
2918 * For private loop or for NPort pt2pt, 2944 * For private loop or for NPort pt2pt,
@@ -2925,7 +2951,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2925 /* Use loop map to make discovery list */ 2951 /* Use loop map to make discovery list */
2926 lpfc_disc_list_loopmap(vport); 2952 lpfc_disc_list_loopmap(vport);
2927 /* Start discovery */ 2953 /* Start discovery */
2928 lpfc_disc_start(vport); 2954 if (vport->fc_flag & FC_PT2PT)
2955 vport->port_state = LPFC_VPORT_READY;
2956 else
2957 lpfc_disc_start(vport);
2929 } else { 2958 } else {
2930 lpfc_start_fdiscs(phba); 2959 lpfc_start_fdiscs(phba);
2931 lpfc_do_scr_ns_plogi(phba, vport); 2960 lpfc_do_scr_ns_plogi(phba, vport);
@@ -3007,6 +3036,15 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3007 break; 3036 break;
3008 } 3037 }
3009 3038
3039 if (phba->fc_topology &&
3040 phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
3041 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3042 "3314 Toplogy changed was 0x%x is 0x%x\n",
3043 phba->fc_topology,
3044 bf_get(lpfc_mbx_read_top_topology, la));
3045 phba->fc_topology_changed = 1;
3046 }
3047
3010 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); 3048 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
3011 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 3049 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
3012 3050
@@ -4235,7 +4273,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
4235 tmo, vport->port_state, vport->fc_flag); 4273 tmo, vport->port_state, vport->fc_flag);
4236 } 4274 }
4237 4275
4238 mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo); 4276 mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
4239 spin_lock_irq(shost->host_lock); 4277 spin_lock_irq(shost->host_lock);
4240 vport->fc_flag |= FC_DISC_TMO; 4278 vport->fc_flag |= FC_DISC_TMO;
4241 spin_unlock_irq(shost->host_lock); 4279 spin_unlock_irq(shost->host_lock);
@@ -4949,8 +4987,12 @@ lpfc_disc_start(struct lpfc_vport *vport)
4949 uint32_t clear_la_pending; 4987 uint32_t clear_la_pending;
4950 int did_changed; 4988 int did_changed;
4951 4989
4952 if (!lpfc_is_link_up(phba)) 4990 if (!lpfc_is_link_up(phba)) {
4991 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
4992 "3315 Link is not up %x\n",
4993 phba->link_state);
4953 return; 4994 return;
4995 }
4954 4996
4955 if (phba->link_state == LPFC_CLEAR_LA) 4997 if (phba->link_state == LPFC_CLEAR_LA)
4956 clear_la_pending = 1; 4998 clear_la_pending = 1;
@@ -4983,11 +5025,13 @@ lpfc_disc_start(struct lpfc_vport *vport)
4983 if (num_sent) 5025 if (num_sent)
4984 return; 5026 return;
4985 5027
4986 /* Register the VPI for SLI3, NON-NPIV only. */ 5028 /* Register the VPI for SLI3, NPIV only. */
4987 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 5029 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4988 !(vport->fc_flag & FC_PT2PT) && 5030 !(vport->fc_flag & FC_PT2PT) &&
4989 !(vport->fc_flag & FC_RSCN_MODE) && 5031 !(vport->fc_flag & FC_RSCN_MODE) &&
4990 (phba->sli_rev < LPFC_SLI_REV4)) { 5032 (phba->sli_rev < LPFC_SLI_REV4)) {
5033 if (vport->port_type == LPFC_PHYSICAL_PORT)
5034 lpfc_issue_clear_la(phba, vport);
4991 lpfc_issue_reg_vpi(phba, vport); 5035 lpfc_issue_reg_vpi(phba, vport);
4992 return; 5036 return;
4993 } 5037 }
@@ -5410,7 +5454,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5410 if (vport->cfg_fdmi_on == 1) 5454 if (vport->cfg_fdmi_on == 1)
5411 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); 5455 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
5412 else 5456 else
5413 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); 5457 mod_timer(&vport->fc_fdmitmo,
5458 jiffies + msecs_to_jiffies(1000 * 60));
5414 5459
5415 /* decrement the node reference count held for this callback 5460 /* decrement the node reference count held for this callback
5416 * function. 5461 * function.
@@ -5855,7 +5900,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
5855 struct lpfc_vport **vports; 5900 struct lpfc_vport **vports;
5856 struct lpfc_nodelist *ndlp; 5901 struct lpfc_nodelist *ndlp;
5857 struct Scsi_Host *shost; 5902 struct Scsi_Host *shost;
5858 int i, rc; 5903 int i = 0, rc;
5859 5904
5860 /* Unregister RPIs */ 5905 /* Unregister RPIs */
5861 if (lpfc_fcf_inuse(phba)) 5906 if (lpfc_fcf_inuse(phba))
@@ -5883,6 +5928,20 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
5883 spin_unlock_irq(shost->host_lock); 5928 spin_unlock_irq(shost->host_lock);
5884 } 5929 }
5885 lpfc_destroy_vport_work_array(phba, vports); 5930 lpfc_destroy_vport_work_array(phba, vports);
5931 if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
5932 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
5933 if (ndlp)
5934 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
5935 lpfc_cleanup_pending_mbox(phba->pport);
5936 if (phba->sli_rev == LPFC_SLI_REV4)
5937 lpfc_sli4_unreg_all_rpis(phba->pport);
5938 lpfc_mbx_unreg_vpi(phba->pport);
5939 shost = lpfc_shost_from_vport(phba->pport);
5940 spin_lock_irq(shost->host_lock);
5941 phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
5942 phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
5943 spin_unlock_irq(shost->host_lock);
5944 }
5886 5945
5887 /* Cleanup any outstanding ELS commands */ 5946 /* Cleanup any outstanding ELS commands */
5888 lpfc_els_flush_all_cmd(phba); 5947 lpfc_els_flush_all_cmd(phba);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index e8c476031703..83700c18f468 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1667,6 +1667,7 @@ enum lpfc_protgrp_type {
1667#define BG_OP_IN_CSUM_OUT_CSUM 0x5 1667#define BG_OP_IN_CSUM_OUT_CSUM 0x5
1668#define BG_OP_IN_CRC_OUT_CSUM 0x6 1668#define BG_OP_IN_CRC_OUT_CSUM 0x6
1669#define BG_OP_IN_CSUM_OUT_CRC 0x7 1669#define BG_OP_IN_CSUM_OUT_CRC 0x7
1670#define BG_OP_RAW_MODE 0x8
1670 1671
1671struct lpfc_pde5 { 1672struct lpfc_pde5 {
1672 uint32_t word0; 1673 uint32_t word0;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 1dd2f6f0a127..713a4613ec3a 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -200,6 +200,11 @@ struct lpfc_sli_intf {
200#define LPFC_MAX_IMAX 5000000 200#define LPFC_MAX_IMAX 5000000
201#define LPFC_DEF_IMAX 50000 201#define LPFC_DEF_IMAX 50000
202 202
203#define LPFC_MIN_CPU_MAP 0
204#define LPFC_MAX_CPU_MAP 2
205#define LPFC_HBA_CPU_MAP 1
206#define LPFC_DRIVER_CPU_MAP 2 /* Default */
207
203/* PORT_CAPABILITIES constants. */ 208/* PORT_CAPABILITIES constants. */
204#define LPFC_MAX_SUPPORTED_PAGES 8 209#define LPFC_MAX_SUPPORTED_PAGES 8
205 210
@@ -621,7 +626,7 @@ struct lpfc_register {
621#define lpfc_sliport_status_rdy_SHIFT 23 626#define lpfc_sliport_status_rdy_SHIFT 23
622#define lpfc_sliport_status_rdy_MASK 0x1 627#define lpfc_sliport_status_rdy_MASK 0x1
623#define lpfc_sliport_status_rdy_WORD word0 628#define lpfc_sliport_status_rdy_WORD word0
624#define MAX_IF_TYPE_2_RESETS 1000 629#define MAX_IF_TYPE_2_RESETS 6
625 630
626#define LPFC_CTL_PORT_CTL_OFFSET 0x408 631#define LPFC_CTL_PORT_CTL_OFFSET 0x408
627#define lpfc_sliport_ctrl_end_SHIFT 30 632#define lpfc_sliport_ctrl_end_SHIFT 30
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 90b8b0515e23..cb465b253910 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -33,6 +33,7 @@
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/firmware.h> 34#include <linux/firmware.h>
35#include <linux/miscdevice.h> 35#include <linux/miscdevice.h>
36#include <linux/percpu.h>
36 37
37#include <scsi/scsi.h> 38#include <scsi/scsi.h>
38#include <scsi/scsi_device.h> 39#include <scsi/scsi_device.h>
@@ -58,6 +59,9 @@ char *_dump_buf_dif;
58unsigned long _dump_buf_dif_order; 59unsigned long _dump_buf_dif_order;
59spinlock_t _dump_buf_lock; 60spinlock_t _dump_buf_lock;
60 61
62/* Used when mapping IRQ vectors in a driver centric manner */
63uint16_t lpfc_used_cpu[LPFC_MAX_CPU];
64
61static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 65static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
62static int lpfc_post_rcv_buf(struct lpfc_hba *); 66static int lpfc_post_rcv_buf(struct lpfc_hba *);
63static int lpfc_sli4_queue_verify(struct lpfc_hba *); 67static int lpfc_sli4_queue_verify(struct lpfc_hba *);
@@ -541,13 +545,16 @@ lpfc_config_port_post(struct lpfc_hba *phba)
541 545
542 /* Set up ring-0 (ELS) timer */ 546 /* Set up ring-0 (ELS) timer */
543 timeout = phba->fc_ratov * 2; 547 timeout = phba->fc_ratov * 2;
544 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 548 mod_timer(&vport->els_tmofunc,
549 jiffies + msecs_to_jiffies(1000 * timeout));
545 /* Set up heart beat (HB) timer */ 550 /* Set up heart beat (HB) timer */
546 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 551 mod_timer(&phba->hb_tmofunc,
552 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
547 phba->hb_outstanding = 0; 553 phba->hb_outstanding = 0;
548 phba->last_completion_time = jiffies; 554 phba->last_completion_time = jiffies;
549 /* Set up error attention (ERATT) polling timer */ 555 /* Set up error attention (ERATT) polling timer */
550 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 556 mod_timer(&phba->eratt_poll,
557 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
551 558
552 if (phba->hba_flag & LINK_DISABLED) { 559 if (phba->hba_flag & LINK_DISABLED) {
553 lpfc_printf_log(phba, 560 lpfc_printf_log(phba,
@@ -908,9 +915,9 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
908 psb->pCmd = NULL; 915 psb->pCmd = NULL;
909 psb->status = IOSTAT_SUCCESS; 916 psb->status = IOSTAT_SUCCESS;
910 } 917 }
911 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 918 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
912 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 919 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
913 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 920 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
914 return 0; 921 return 0;
915} 922}
916 923
@@ -1021,7 +1028,8 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1021 !(phba->link_state == LPFC_HBA_ERROR) && 1028 !(phba->link_state == LPFC_HBA_ERROR) &&
1022 !(phba->pport->load_flag & FC_UNLOADING)) 1029 !(phba->pport->load_flag & FC_UNLOADING))
1023 mod_timer(&phba->hb_tmofunc, 1030 mod_timer(&phba->hb_tmofunc,
1024 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1031 jiffies +
1032 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1025 return; 1033 return;
1026} 1034}
1027 1035
@@ -1064,15 +1072,18 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1064 1072
1065 spin_lock_irq(&phba->pport->work_port_lock); 1073 spin_lock_irq(&phba->pport->work_port_lock);
1066 1074
1067 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 1075 if (time_after(phba->last_completion_time +
1068 jiffies)) { 1076 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1077 jiffies)) {
1069 spin_unlock_irq(&phba->pport->work_port_lock); 1078 spin_unlock_irq(&phba->pport->work_port_lock);
1070 if (!phba->hb_outstanding) 1079 if (!phba->hb_outstanding)
1071 mod_timer(&phba->hb_tmofunc, 1080 mod_timer(&phba->hb_tmofunc,
1072 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1081 jiffies +
1082 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1073 else 1083 else
1074 mod_timer(&phba->hb_tmofunc, 1084 mod_timer(&phba->hb_tmofunc,
1075 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1085 jiffies +
1086 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1076 return; 1087 return;
1077 } 1088 }
1078 spin_unlock_irq(&phba->pport->work_port_lock); 1089 spin_unlock_irq(&phba->pport->work_port_lock);
@@ -1104,7 +1115,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1104 if (!pmboxq) { 1115 if (!pmboxq) {
1105 mod_timer(&phba->hb_tmofunc, 1116 mod_timer(&phba->hb_tmofunc,
1106 jiffies + 1117 jiffies +
1107 HZ * LPFC_HB_MBOX_INTERVAL); 1118 msecs_to_jiffies(1000 *
1119 LPFC_HB_MBOX_INTERVAL));
1108 return; 1120 return;
1109 } 1121 }
1110 1122
@@ -1120,7 +1132,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1120 phba->mbox_mem_pool); 1132 phba->mbox_mem_pool);
1121 mod_timer(&phba->hb_tmofunc, 1133 mod_timer(&phba->hb_tmofunc,
1122 jiffies + 1134 jiffies +
1123 HZ * LPFC_HB_MBOX_INTERVAL); 1135 msecs_to_jiffies(1000 *
1136 LPFC_HB_MBOX_INTERVAL));
1124 return; 1137 return;
1125 } 1138 }
1126 phba->skipped_hb = 0; 1139 phba->skipped_hb = 0;
@@ -1136,7 +1149,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1136 phba->skipped_hb = jiffies; 1149 phba->skipped_hb = jiffies;
1137 1150
1138 mod_timer(&phba->hb_tmofunc, 1151 mod_timer(&phba->hb_tmofunc,
1139 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1152 jiffies +
1153 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1140 return; 1154 return;
1141 } else { 1155 } else {
1142 /* 1156 /*
@@ -1150,7 +1164,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1150 jiffies_to_msecs(jiffies 1164 jiffies_to_msecs(jiffies
1151 - phba->last_completion_time)); 1165 - phba->last_completion_time));
1152 mod_timer(&phba->hb_tmofunc, 1166 mod_timer(&phba->hb_tmofunc,
1153 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1167 jiffies +
1168 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1154 } 1169 }
1155 } 1170 }
1156} 1171}
@@ -1191,7 +1206,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
1191 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1206 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1192 * other than Port Error 6 has been detected. 1207 * other than Port Error 6 has been detected.
1193 **/ 1208 **/
1194static void 1209void
1195lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1210lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1196{ 1211{
1197 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1212 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
@@ -2633,6 +2648,7 @@ lpfc_online(struct lpfc_hba *phba)
2633 struct lpfc_vport *vport; 2648 struct lpfc_vport *vport;
2634 struct lpfc_vport **vports; 2649 struct lpfc_vport **vports;
2635 int i; 2650 int i;
2651 bool vpis_cleared = false;
2636 2652
2637 if (!phba) 2653 if (!phba)
2638 return 0; 2654 return 0;
@@ -2656,6 +2672,10 @@ lpfc_online(struct lpfc_hba *phba)
2656 lpfc_unblock_mgmt_io(phba); 2672 lpfc_unblock_mgmt_io(phba);
2657 return 1; 2673 return 1;
2658 } 2674 }
2675 spin_lock_irq(&phba->hbalock);
2676 if (!phba->sli4_hba.max_cfg_param.vpi_used)
2677 vpis_cleared = true;
2678 spin_unlock_irq(&phba->hbalock);
2659 } else { 2679 } else {
2660 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2680 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2661 lpfc_unblock_mgmt_io(phba); 2681 lpfc_unblock_mgmt_io(phba);
@@ -2672,8 +2692,13 @@ lpfc_online(struct lpfc_hba *phba)
2672 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2692 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2673 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2693 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2674 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2694 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2675 if (phba->sli_rev == LPFC_SLI_REV4) 2695 if (phba->sli_rev == LPFC_SLI_REV4) {
2676 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2696 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2697 if ((vpis_cleared) &&
2698 (vports[i]->port_type !=
2699 LPFC_PHYSICAL_PORT))
2700 vports[i]->vpi = 0;
2701 }
2677 spin_unlock_irq(shost->host_lock); 2702 spin_unlock_irq(shost->host_lock);
2678 } 2703 }
2679 lpfc_destroy_vport_work_array(phba, vports); 2704 lpfc_destroy_vport_work_array(phba, vports);
@@ -2833,16 +2858,30 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2833 struct lpfc_iocbq *io, *io_next; 2858 struct lpfc_iocbq *io, *io_next;
2834 2859
2835 spin_lock_irq(&phba->hbalock); 2860 spin_lock_irq(&phba->hbalock);
2861
2836 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2862 /* Release all the lpfc_scsi_bufs maintained by this host. */
2837 spin_lock(&phba->scsi_buf_list_lock); 2863
2838 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2864 spin_lock(&phba->scsi_buf_list_put_lock);
2865 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
2866 list) {
2839 list_del(&sb->list); 2867 list_del(&sb->list);
2840 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2868 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2841 sb->dma_handle); 2869 sb->dma_handle);
2842 kfree(sb); 2870 kfree(sb);
2843 phba->total_scsi_bufs--; 2871 phba->total_scsi_bufs--;
2844 } 2872 }
2845 spin_unlock(&phba->scsi_buf_list_lock); 2873 spin_unlock(&phba->scsi_buf_list_put_lock);
2874
2875 spin_lock(&phba->scsi_buf_list_get_lock);
2876 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
2877 list) {
2878 list_del(&sb->list);
2879 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2880 sb->dma_handle);
2881 kfree(sb);
2882 phba->total_scsi_bufs--;
2883 }
2884 spin_unlock(&phba->scsi_buf_list_get_lock);
2846 2885
2847 /* Release all the lpfc_iocbq entries maintained by this host. */ 2886 /* Release all the lpfc_iocbq entries maintained by this host. */
2848 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2887 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
@@ -2978,9 +3017,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
2978 phba->sli4_hba.scsi_xri_cnt, 3017 phba->sli4_hba.scsi_xri_cnt,
2979 phba->sli4_hba.scsi_xri_max); 3018 phba->sli4_hba.scsi_xri_max);
2980 3019
2981 spin_lock_irq(&phba->scsi_buf_list_lock); 3020 spin_lock_irq(&phba->scsi_buf_list_get_lock);
2982 list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list); 3021 spin_lock_irq(&phba->scsi_buf_list_put_lock);
2983 spin_unlock_irq(&phba->scsi_buf_list_lock); 3022 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
3023 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
3024 spin_unlock_irq(&phba->scsi_buf_list_put_lock);
3025 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
2984 3026
2985 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { 3027 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
2986 /* max scsi xri shrinked below the allocated scsi buffers */ 3028 /* max scsi xri shrinked below the allocated scsi buffers */
@@ -2994,9 +3036,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
2994 psb->dma_handle); 3036 psb->dma_handle);
2995 kfree(psb); 3037 kfree(psb);
2996 } 3038 }
2997 spin_lock_irq(&phba->scsi_buf_list_lock); 3039 spin_lock_irq(&phba->scsi_buf_list_get_lock);
2998 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; 3040 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
2999 spin_unlock_irq(&phba->scsi_buf_list_lock); 3041 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3000 } 3042 }
3001 3043
3002 /* update xris associated to remaining allocated scsi buffers */ 3044 /* update xris associated to remaining allocated scsi buffers */
@@ -3014,9 +3056,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
3014 psb->cur_iocbq.sli4_lxritag = lxri; 3056 psb->cur_iocbq.sli4_lxritag = lxri;
3015 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3057 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3016 } 3058 }
3017 spin_lock_irq(&phba->scsi_buf_list_lock); 3059 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3018 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list); 3060 spin_lock_irq(&phba->scsi_buf_list_put_lock);
3019 spin_unlock_irq(&phba->scsi_buf_list_lock); 3061 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
3062 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
3063 spin_unlock_irq(&phba->scsi_buf_list_put_lock);
3064 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3020 3065
3021 return 0; 3066 return 0;
3022 3067
@@ -3197,14 +3242,15 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3197 stat = 1; 3242 stat = 1;
3198 goto finished; 3243 goto finished;
3199 } 3244 }
3200 if (time >= 30 * HZ) { 3245 if (time >= msecs_to_jiffies(30 * 1000)) {
3201 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3246 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3202 "0461 Scanning longer than 30 " 3247 "0461 Scanning longer than 30 "
3203 "seconds. Continuing initialization\n"); 3248 "seconds. Continuing initialization\n");
3204 stat = 1; 3249 stat = 1;
3205 goto finished; 3250 goto finished;
3206 } 3251 }
3207 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 3252 if (time >= msecs_to_jiffies(15 * 1000) &&
3253 phba->link_state <= LPFC_LINK_DOWN) {
3208 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3254 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3209 "0465 Link down longer than 15 " 3255 "0465 Link down longer than 15 "
3210 "seconds. Continuing initialization\n"); 3256 "seconds. Continuing initialization\n");
@@ -3216,7 +3262,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3216 goto finished; 3262 goto finished;
3217 if (vport->num_disc_nodes || vport->fc_prli_sent) 3263 if (vport->num_disc_nodes || vport->fc_prli_sent)
3218 goto finished; 3264 goto finished;
3219 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 3265 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
3220 goto finished; 3266 goto finished;
3221 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 3267 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
3222 goto finished; 3268 goto finished;
@@ -4215,7 +4261,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
4215 * If there are other active VLinks present, 4261 * If there are other active VLinks present,
4216 * re-instantiate the Vlink using FDISC. 4262 * re-instantiate the Vlink using FDISC.
4217 */ 4263 */
4218 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 4264 mod_timer(&ndlp->nlp_delayfunc,
4265 jiffies + msecs_to_jiffies(1000));
4219 shost = lpfc_shost_from_vport(vport); 4266 shost = lpfc_shost_from_vport(vport);
4220 spin_lock_irq(shost->host_lock); 4267 spin_lock_irq(shost->host_lock);
4221 ndlp->nlp_flag |= NLP_DELAY_TMO; 4268 ndlp->nlp_flag |= NLP_DELAY_TMO;
@@ -4707,23 +4754,52 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4707 return -ENOMEM; 4754 return -ENOMEM;
4708 4755
4709 /* 4756 /*
4710 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4757 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
4711 * used to create the sg_dma_buf_pool must be dynamically calculated. 4758 * used to create the sg_dma_buf_pool must be dynamically calculated.
4712 * 2 segments are added since the IOCB needs a command and response bde.
4713 */ 4759 */
4714 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4715 sizeof(struct fcp_rsp) +
4716 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4717 4760
4761 /* Initialize the host templates the configured values. */
4762 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4763 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4764
4765 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
4718 if (phba->cfg_enable_bg) { 4766 if (phba->cfg_enable_bg) {
4719 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 4767 /*
4720 phba->cfg_sg_dma_buf_size += 4768 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
4721 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 4769 * the FCP rsp, and a BDE for each. Sice we have no control
4770 * over how many protection data segments the SCSI Layer
4771 * will hand us (ie: there could be one for every block
4772 * in the IO), we just allocate enough BDEs to accomidate
4773 * our max amount and we need to limit lpfc_sg_seg_cnt to
4774 * minimize the risk of running out.
4775 */
4776 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4777 sizeof(struct fcp_rsp) +
4778 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
4779
4780 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
4781 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
4782
4783 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
4784 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4785 } else {
4786 /*
4787 * The scsi_buf for a regular I/O will hold the FCP cmnd,
4788 * the FCP rsp, a BDE for each, and a BDE for up to
4789 * cfg_sg_seg_cnt data segments.
4790 */
4791 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4792 sizeof(struct fcp_rsp) +
4793 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4794
4795 /* Total BDEs in BPL for scsi_sg_list */
4796 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
4722 } 4797 }
4723 4798
4724 /* Also reinitialize the host templates with new values. */ 4799 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4725 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4800 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
4726 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4801 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
4802 phba->cfg_total_seg_cnt);
4727 4803
4728 phba->max_vpi = LPFC_MAX_VPI; 4804 phba->max_vpi = LPFC_MAX_VPI;
4729 /* This will be set to correct value after config_port mbox */ 4805 /* This will be set to correct value after config_port mbox */
@@ -4789,13 +4865,13 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4789static int 4865static int
4790lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 4866lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4791{ 4867{
4868 struct lpfc_vector_map_info *cpup;
4792 struct lpfc_sli *psli; 4869 struct lpfc_sli *psli;
4793 LPFC_MBOXQ_t *mboxq; 4870 LPFC_MBOXQ_t *mboxq;
4794 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 4871 int rc, i, hbq_count, max_buf_size;
4795 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4872 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4796 struct lpfc_mqe *mqe; 4873 struct lpfc_mqe *mqe;
4797 int longs, sli_family; 4874 int longs;
4798 int sges_per_segment;
4799 4875
4800 /* Before proceed, wait for POST done and device ready */ 4876 /* Before proceed, wait for POST done and device ready */
4801 rc = lpfc_sli4_post_status_check(phba); 4877 rc = lpfc_sli4_post_status_check(phba);
@@ -4863,11 +4939,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4863 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4939 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4864 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4940 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4865 4941
4866 /* With BlockGuard we can have multiple SGEs per Data Segemnt */
4867 sges_per_segment = 1;
4868 if (phba->cfg_enable_bg)
4869 sges_per_segment = 2;
4870
4871 /* 4942 /*
4872 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 4943 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
4873 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. 4944 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
@@ -4878,43 +4949,71 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4878 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 4949 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4879 if (!phba->sli.ring) 4950 if (!phba->sli.ring)
4880 return -ENOMEM; 4951 return -ENOMEM;
4952
4881 /* 4953 /*
4882 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4954 * It doesn't matter what family our adapter is in, we are
4955 * limited to 2 Pages, 512 SGEs, for our SGL.
4956 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
4957 */
4958 max_buf_size = (2 * SLI4_PAGE_SIZE);
4959 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
4960 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
4961
4962 /*
4963 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
4883 * used to create the sg_dma_buf_pool must be dynamically calculated. 4964 * used to create the sg_dma_buf_pool must be dynamically calculated.
4884 * 2 segments are added since the IOCB needs a command and response bde.
4885 * To insure that the scsi sgl does not cross a 4k page boundary only
4886 * sgl sizes of must be a power of 2.
4887 */ 4965 */
4888 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 4966
4889 (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) * 4967 if (phba->cfg_enable_bg) {
4890 sizeof(struct sli4_sge))); 4968 /*
4891 4969 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
4892 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 4970 * the FCP rsp, and a SGE for each. Sice we have no control
4893 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 4971 * over how many protection data segments the SCSI Layer
4894 switch (sli_family) { 4972 * will hand us (ie: there could be one for every block
4895 case LPFC_SLI_INTF_FAMILY_BE2: 4973 * in the IO), we just allocate enough SGEs to accomidate
4896 case LPFC_SLI_INTF_FAMILY_BE3: 4974 * our max amount and we need to limit lpfc_sg_seg_cnt to
4897 /* There is a single hint for BE - 2 pages per BPL. */ 4975 * minimize the risk of running out.
4898 if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == 4976 */
4899 LPFC_SLI_INTF_SLI_HINT1_1) 4977 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4900 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; 4978 sizeof(struct fcp_rsp) + max_buf_size;
4901 break; 4979
4902 case LPFC_SLI_INTF_FAMILY_LNCR_A0: 4980 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
4903 case LPFC_SLI_INTF_FAMILY_LNCR_B0: 4981 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
4904 default: 4982
4905 break; 4983 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
4984 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
4985 } else {
4986 /*
4987 * The scsi_buf for a regular I/O will hold the FCP cmnd,
4988 * the FCP rsp, a SGE for each, and a SGE for up to
4989 * cfg_sg_seg_cnt data segments.
4990 */
4991 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4992 sizeof(struct fcp_rsp) +
4993 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
4994
4995 /* Total SGEs for scsi_sg_list */
4996 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
4997 /*
4998 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
4999 * to post 1 page for the SGL.
5000 */
4906 } 5001 }
4907 5002
4908 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 5003 /* Initialize the host templates with the updated values. */
4909 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 5004 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4910 dma_buf_size = dma_buf_size << 1) 5005 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4911 ; 5006
4912 if (dma_buf_size == max_buf_size) 5007 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
4913 phba->cfg_sg_seg_cnt = (dma_buf_size - 5008 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
4914 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - 5009 else
4915 (2 * sizeof(struct sli4_sge))) / 5010 phba->cfg_sg_dma_buf_size =
4916 sizeof(struct sli4_sge); 5011 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
4917 phba->cfg_sg_dma_buf_size = dma_buf_size; 5012
5013 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5014 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
5015 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5016 phba->cfg_total_seg_cnt);
4918 5017
4919 /* Initialize buffer queue management fields */ 5018 /* Initialize buffer queue management fields */
4920 hbq_count = lpfc_sli_hbq_count(); 5019 hbq_count = lpfc_sli_hbq_count();
@@ -5104,6 +5203,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5104 goto out_free_fcp_eq_hdl; 5203 goto out_free_fcp_eq_hdl;
5105 } 5204 }
5106 5205
5206 phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) *
5207 phba->sli4_hba.num_present_cpu),
5208 GFP_KERNEL);
5209 if (!phba->sli4_hba.cpu_map) {
5210 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5211 "3327 Failed allocate memory for msi-x "
5212 "interrupt vector mapping\n");
5213 rc = -ENOMEM;
5214 goto out_free_msix;
5215 }
5216 /* Initialize io channels for round robin */
5217 cpup = phba->sli4_hba.cpu_map;
5218 rc = 0;
5219 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
5220 cpup->channel_id = rc;
5221 rc++;
5222 if (rc >= phba->cfg_fcp_io_channel)
5223 rc = 0;
5224 }
5225
5107 /* 5226 /*
5108 * Enable sr-iov virtual functions if supported and configured 5227 * Enable sr-iov virtual functions if supported and configured
5109 * through the module parameter. 5228 * through the module parameter.
@@ -5123,6 +5242,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5123 5242
5124 return 0; 5243 return 0;
5125 5244
5245out_free_msix:
5246 kfree(phba->sli4_hba.msix_entries);
5126out_free_fcp_eq_hdl: 5247out_free_fcp_eq_hdl:
5127 kfree(phba->sli4_hba.fcp_eq_hdl); 5248 kfree(phba->sli4_hba.fcp_eq_hdl);
5128out_free_fcf_rr_bmask: 5249out_free_fcf_rr_bmask:
@@ -5152,6 +5273,11 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
5152{ 5273{
5153 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 5274 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
5154 5275
5276 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
5277 kfree(phba->sli4_hba.cpu_map);
5278 phba->sli4_hba.num_present_cpu = 0;
5279 phba->sli4_hba.num_online_cpu = 0;
5280
5155 /* Free memory allocated for msi-x interrupt vector entries */ 5281 /* Free memory allocated for msi-x interrupt vector entries */
5156 kfree(phba->sli4_hba.msix_entries); 5282 kfree(phba->sli4_hba.msix_entries);
5157 5283
@@ -5260,8 +5386,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5260 init_waitqueue_head(&phba->work_waitq); 5386 init_waitqueue_head(&phba->work_waitq);
5261 5387
5262 /* Initialize the scsi buffer list used by driver for scsi IO */ 5388 /* Initialize the scsi buffer list used by driver for scsi IO */
5263 spin_lock_init(&phba->scsi_buf_list_lock); 5389 spin_lock_init(&phba->scsi_buf_list_get_lock);
5264 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 5390 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
5391 spin_lock_init(&phba->scsi_buf_list_put_lock);
5392 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
5265 5393
5266 /* Initialize the fabric iocb list */ 5394 /* Initialize the fabric iocb list */
5267 INIT_LIST_HEAD(&phba->fabric_iocb_list); 5395 INIT_LIST_HEAD(&phba->fabric_iocb_list);
@@ -6696,6 +6824,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6696 int cfg_fcp_io_channel; 6824 int cfg_fcp_io_channel;
6697 uint32_t cpu; 6825 uint32_t cpu;
6698 uint32_t i = 0; 6826 uint32_t i = 0;
6827 uint32_t j = 0;
6699 6828
6700 6829
6701 /* 6830 /*
@@ -6706,15 +6835,21 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6706 /* Sanity check on HBA EQ parameters */ 6835 /* Sanity check on HBA EQ parameters */
6707 cfg_fcp_io_channel = phba->cfg_fcp_io_channel; 6836 cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
6708 6837
6709 /* It doesn't make sense to have more io channels then CPUs */ 6838 /* It doesn't make sense to have more io channels then online CPUs */
6710 for_each_online_cpu(cpu) { 6839 for_each_present_cpu(cpu) {
6711 i++; 6840 if (cpu_online(cpu))
6841 i++;
6842 j++;
6712 } 6843 }
6844 phba->sli4_hba.num_online_cpu = i;
6845 phba->sli4_hba.num_present_cpu = j;
6846
6713 if (i < cfg_fcp_io_channel) { 6847 if (i < cfg_fcp_io_channel) {
6714 lpfc_printf_log(phba, 6848 lpfc_printf_log(phba,
6715 KERN_ERR, LOG_INIT, 6849 KERN_ERR, LOG_INIT,
6716 "3188 Reducing IO channels to match number of " 6850 "3188 Reducing IO channels to match number of "
6717 "CPUs: from %d to %d\n", cfg_fcp_io_channel, i); 6851 "online CPUs: from %d to %d\n",
6852 cfg_fcp_io_channel, i);
6718 cfg_fcp_io_channel = i; 6853 cfg_fcp_io_channel = i;
6719 } 6854 }
6720 6855
@@ -7743,8 +7878,13 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
7743 7878
7744out: 7879out:
7745 /* Catch the not-ready port failure after a port reset. */ 7880 /* Catch the not-ready port failure after a port reset. */
7746 if (num_resets >= MAX_IF_TYPE_2_RESETS) 7881 if (num_resets >= MAX_IF_TYPE_2_RESETS) {
7882 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7883 "3317 HBA not functional: IP Reset Failed "
7884 "after (%d) retries, try: "
7885 "echo fw_reset > board_mode\n", num_resets);
7747 rc = -ENODEV; 7886 rc = -ENODEV;
7887 }
7748 7888
7749 return rc; 7889 return rc;
7750} 7890}
@@ -8209,6 +8349,269 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)
8209} 8349}
8210 8350
8211/** 8351/**
8352 * lpfc_find_next_cpu - Find next available CPU that matches the phys_id
8353 * @phba: pointer to lpfc hba data structure.
8354 *
8355 * Find next available CPU to use for IRQ to CPU affinity.
8356 */
8357static int
8358lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id)
8359{
8360 struct lpfc_vector_map_info *cpup;
8361 int cpu;
8362
8363 cpup = phba->sli4_hba.cpu_map;
8364 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8365 /* CPU must be online */
8366 if (cpu_online(cpu)) {
8367 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
8368 (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) &&
8369 (cpup->phys_id == phys_id)) {
8370 return cpu;
8371 }
8372 }
8373 cpup++;
8374 }
8375
8376 /*
8377 * If we get here, we have used ALL CPUs for the specific
8378 * phys_id. Now we need to clear out lpfc_used_cpu and start
8379 * reusing CPUs.
8380 */
8381
8382 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8383 if (lpfc_used_cpu[cpu] == phys_id)
8384 lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
8385 }
8386
8387 cpup = phba->sli4_hba.cpu_map;
8388 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8389 /* CPU must be online */
8390 if (cpu_online(cpu)) {
8391 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
8392 (cpup->phys_id == phys_id)) {
8393 return cpu;
8394 }
8395 }
8396 cpup++;
8397 }
8398 return LPFC_VECTOR_MAP_EMPTY;
8399}
8400
8401/**
8402 * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors
8403 * @phba: pointer to lpfc hba data structure.
8404 * @vectors: number of HBA vectors
8405 *
8406 * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector
8407 * affinization across multple physical CPUs (numa nodes).
8408 * In addition, this routine will assign an IO channel for each CPU
8409 * to use when issuing I/Os.
8410 */
8411static int
8412lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
8413{
8414 int i, idx, saved_chann, used_chann, cpu, phys_id;
8415 int max_phys_id, num_io_channel, first_cpu;
8416 struct lpfc_vector_map_info *cpup;
8417#ifdef CONFIG_X86
8418 struct cpuinfo_x86 *cpuinfo;
8419#endif
8420 struct cpumask *mask;
8421 uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
8422
8423 /* If there is no mapping, just return */
8424 if (!phba->cfg_fcp_cpu_map)
8425 return 1;
8426
8427 /* Init cpu_map array */
8428 memset(phba->sli4_hba.cpu_map, 0xff,
8429 (sizeof(struct lpfc_vector_map_info) *
8430 phba->sli4_hba.num_present_cpu));
8431
8432 max_phys_id = 0;
8433 phys_id = 0;
8434 num_io_channel = 0;
8435 first_cpu = LPFC_VECTOR_MAP_EMPTY;
8436
8437 /* Update CPU map with physical id and core id of each CPU */
8438 cpup = phba->sli4_hba.cpu_map;
8439 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8440#ifdef CONFIG_X86
8441 cpuinfo = &cpu_data(cpu);
8442 cpup->phys_id = cpuinfo->phys_proc_id;
8443 cpup->core_id = cpuinfo->cpu_core_id;
8444#else
8445 /* No distinction between CPUs for other platforms */
8446 cpup->phys_id = 0;
8447 cpup->core_id = 0;
8448#endif
8449
8450 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8451 "3328 CPU physid %d coreid %d\n",
8452 cpup->phys_id, cpup->core_id);
8453
8454 if (cpup->phys_id > max_phys_id)
8455 max_phys_id = cpup->phys_id;
8456 cpup++;
8457 }
8458
8459 /* Now associate the HBA vectors with specific CPUs */
8460 for (idx = 0; idx < vectors; idx++) {
8461 cpup = phba->sli4_hba.cpu_map;
8462 cpu = lpfc_find_next_cpu(phba, phys_id);
8463 if (cpu == LPFC_VECTOR_MAP_EMPTY) {
8464
8465 /* Try for all phys_id's */
8466 for (i = 1; i < max_phys_id; i++) {
8467 phys_id++;
8468 if (phys_id > max_phys_id)
8469 phys_id = 0;
8470 cpu = lpfc_find_next_cpu(phba, phys_id);
8471 if (cpu == LPFC_VECTOR_MAP_EMPTY)
8472 continue;
8473 goto found;
8474 }
8475
8476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8477 "3329 Cannot set affinity:"
8478 "Error mapping vector %d (%d)\n",
8479 idx, vectors);
8480 return 0;
8481 }
8482found:
8483 cpup += cpu;
8484 if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP)
8485 lpfc_used_cpu[cpu] = phys_id;
8486
8487 /* Associate vector with selected CPU */
8488 cpup->irq = phba->sli4_hba.msix_entries[idx].vector;
8489
8490 /* Associate IO channel with selected CPU */
8491 cpup->channel_id = idx;
8492 num_io_channel++;
8493
8494 if (first_cpu == LPFC_VECTOR_MAP_EMPTY)
8495 first_cpu = cpu;
8496
8497 /* Now affinitize to the selected CPU */
8498 mask = &cpup->maskbits;
8499 cpumask_clear(mask);
8500 cpumask_set_cpu(cpu, mask);
8501 i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
8502 vector, mask);
8503
8504 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8505 "3330 Set Affinity: CPU %d channel %d "
8506 "irq %d (%x)\n",
8507 cpu, cpup->channel_id,
8508 phba->sli4_hba.msix_entries[idx].vector, i);
8509
8510 /* Spread vector mapping across multple physical CPU nodes */
8511 phys_id++;
8512 if (phys_id > max_phys_id)
8513 phys_id = 0;
8514 }
8515
8516 /*
8517 * Finally fill in the IO channel for any remaining CPUs.
8518 * At this point, all IO channels have been assigned to a specific
8519 * MSIx vector, mapped to a specific CPU.
8520 * Base the remaining IO channel assigned, to IO channels already
8521 * assigned to other CPUs on the same phys_id.
8522 */
8523 for (i = 0; i <= max_phys_id; i++) {
8524 /*
8525 * If there are no io channels already mapped to
8526 * this phys_id, just round robin thru the io_channels.
8527 * Setup chann[] for round robin.
8528 */
8529 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
8530 chann[idx] = idx;
8531
8532 saved_chann = 0;
8533 used_chann = 0;
8534
8535 /*
8536 * First build a list of IO channels already assigned
8537 * to this phys_id before reassigning the same IO
8538 * channels to the remaining CPUs.
8539 */
8540 cpup = phba->sli4_hba.cpu_map;
8541 cpu = first_cpu;
8542 cpup += cpu;
8543 for (idx = 0; idx < phba->sli4_hba.num_present_cpu;
8544 idx++) {
8545 if (cpup->phys_id == i) {
8546 /*
8547 * Save any IO channels that are
8548 * already mapped to this phys_id.
8549 */
8550 if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
8551 chann[saved_chann] =
8552 cpup->channel_id;
8553 saved_chann++;
8554 goto out;
8555 }
8556
8557 /* See if we are using round-robin */
8558 if (saved_chann == 0)
8559 saved_chann =
8560 phba->cfg_fcp_io_channel;
8561
8562 /* Associate next IO channel with CPU */
8563 cpup->channel_id = chann[used_chann];
8564 num_io_channel++;
8565 used_chann++;
8566 if (used_chann == saved_chann)
8567 used_chann = 0;
8568
8569 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8570 "3331 Set IO_CHANN "
8571 "CPU %d channel %d\n",
8572 idx, cpup->channel_id);
8573 }
8574out:
8575 cpu++;
8576 if (cpu >= phba->sli4_hba.num_present_cpu) {
8577 cpup = phba->sli4_hba.cpu_map;
8578 cpu = 0;
8579 } else {
8580 cpup++;
8581 }
8582 }
8583 }
8584
8585 if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) {
8586 cpup = phba->sli4_hba.cpu_map;
8587 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
8588 if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) {
8589 cpup->channel_id = 0;
8590 num_io_channel++;
8591
8592 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8593 "3332 Assign IO_CHANN "
8594 "CPU %d channel %d\n",
8595 idx, cpup->channel_id);
8596 }
8597 cpup++;
8598 }
8599 }
8600
8601 /* Sanity check */
8602 if (num_io_channel != phba->sli4_hba.num_present_cpu)
8603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8604 "3333 Set affinity mismatch:"
8605 "%d chann != %d cpus: %d vactors\n",
8606 num_io_channel, phba->sli4_hba.num_present_cpu,
8607 vectors);
8608
8609 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
8610 return 1;
8611}
8612
8613
8614/**
8212 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 8615 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
8213 * @phba: pointer to lpfc hba data structure. 8616 * @phba: pointer to lpfc hba data structure.
8214 * 8617 *
@@ -8259,9 +8662,7 @@ enable_msix_vectors:
8259 phba->sli4_hba.msix_entries[index].vector, 8662 phba->sli4_hba.msix_entries[index].vector,
8260 phba->sli4_hba.msix_entries[index].entry); 8663 phba->sli4_hba.msix_entries[index].entry);
8261 8664
8262 /* 8665 /* Assign MSI-X vectors to interrupt handlers */
8263 * Assign MSI-X vectors to interrupt handlers
8264 */
8265 for (index = 0; index < vectors; index++) { 8666 for (index = 0; index < vectors; index++) {
8266 memset(&phba->sli4_hba.handler_name[index], 0, 16); 8667 memset(&phba->sli4_hba.handler_name[index], 0, 16);
8267 sprintf((char *)&phba->sli4_hba.handler_name[index], 8668 sprintf((char *)&phba->sli4_hba.handler_name[index],
@@ -8289,6 +8690,8 @@ enable_msix_vectors:
8289 phba->cfg_fcp_io_channel, vectors); 8690 phba->cfg_fcp_io_channel, vectors);
8290 phba->cfg_fcp_io_channel = vectors; 8691 phba->cfg_fcp_io_channel = vectors;
8291 } 8692 }
8693
8694 lpfc_sli4_set_affinity(phba, vectors);
8292 return rc; 8695 return rc;
8293 8696
8294cfg_fail_out: 8697cfg_fail_out:
@@ -9213,15 +9616,15 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
9213 /* Block all SCSI devices' I/Os on the host */ 9616 /* Block all SCSI devices' I/Os on the host */
9214 lpfc_scsi_dev_block(phba); 9617 lpfc_scsi_dev_block(phba);
9215 9618
9619 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9620 lpfc_sli_flush_fcp_rings(phba);
9621
9216 /* stop all timers */ 9622 /* stop all timers */
9217 lpfc_stop_hba_timers(phba); 9623 lpfc_stop_hba_timers(phba);
9218 9624
9219 /* Disable interrupt and pci device */ 9625 /* Disable interrupt and pci device */
9220 lpfc_sli_disable_intr(phba); 9626 lpfc_sli_disable_intr(phba);
9221 pci_disable_device(phba->pcidev); 9627 pci_disable_device(phba->pcidev);
9222
9223 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9224 lpfc_sli_flush_fcp_rings(phba);
9225} 9628}
9226 9629
9227/** 9630/**
@@ -9966,6 +10369,9 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9966 /* Block all SCSI devices' I/Os on the host */ 10369 /* Block all SCSI devices' I/Os on the host */
9967 lpfc_scsi_dev_block(phba); 10370 lpfc_scsi_dev_block(phba);
9968 10371
10372 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
10373 lpfc_sli_flush_fcp_rings(phba);
10374
9969 /* stop all timers */ 10375 /* stop all timers */
9970 lpfc_stop_hba_timers(phba); 10376 lpfc_stop_hba_timers(phba);
9971 10377
@@ -9973,9 +10379,6 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9973 lpfc_sli4_disable_intr(phba); 10379 lpfc_sli4_disable_intr(phba);
9974 lpfc_sli4_queue_destroy(phba); 10380 lpfc_sli4_queue_destroy(phba);
9975 pci_disable_device(phba->pcidev); 10381 pci_disable_device(phba->pcidev);
9976
9977 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9978 lpfc_sli_flush_fcp_rings(phba);
9979} 10382}
9980 10383
9981/** 10384/**
@@ -10535,6 +10938,7 @@ static struct miscdevice lpfc_mgmt_dev = {
10535static int __init 10938static int __init
10536lpfc_init(void) 10939lpfc_init(void)
10537{ 10940{
10941 int cpu;
10538 int error = 0; 10942 int error = 0;
10539 10943
10540 printk(LPFC_MODULE_DESC "\n"); 10944 printk(LPFC_MODULE_DESC "\n");
@@ -10561,6 +10965,11 @@ lpfc_init(void)
10561 return -ENOMEM; 10965 return -ENOMEM;
10562 } 10966 }
10563 } 10967 }
10968
10969 /* Initialize in case vector mapping is needed */
10970 for (cpu = 0; cpu < LPFC_MAX_CPU; cpu++)
10971 lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
10972
10564 error = pci_register_driver(&lpfc_driver); 10973 error = pci_register_driver(&lpfc_driver);
10565 if (error) { 10974 if (error) {
10566 fc_release_transport(lpfc_transport_template); 10975 fc_release_transport(lpfc_transport_template);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index baf53e6c2bd1..2a4e5d21eab2 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -37,6 +37,7 @@
37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ 37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
38#define LOG_FIP 0x00020000 /* FIP events */ 38#define LOG_FIP 0x00020000 /* FIP events */
39#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */ 39#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */
40#define LOG_SCSI_CMD 0x00080000 /* ALL SCSI commands */
40#define LOG_ALL_MSG 0xffffffff /* LOG all messages */ 41#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
41 42
42#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 43#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index a7a9fa468308..41363db7d426 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2149,18 +2149,21 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
2149 2149
2150 /* Only FC supports upd bit */ 2150 /* Only FC supports upd bit */
2151 if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) && 2151 if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
2152 (vport->fc_flag & FC_VFI_REGISTERED)) { 2152 (vport->fc_flag & FC_VFI_REGISTERED) &&
2153 (!phba->fc_topology_changed)) {
2153 bf_set(lpfc_reg_vfi_vp, reg_vfi, 0); 2154 bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
2154 bf_set(lpfc_reg_vfi_upd, reg_vfi, 1); 2155 bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
2155 } 2156 }
2156 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX, 2157 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
2157 "3134 Register VFI, mydid:x%x, fcfi:%d, " 2158 "3134 Register VFI, mydid:x%x, fcfi:%d, "
2158 " vfi:%d, vpi:%d, fc_pname:%x%x\n", 2159 " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
2160 " port_state:x%x topology chg:%d\n",
2159 vport->fc_myDID, 2161 vport->fc_myDID,
2160 phba->fcf.fcfi, 2162 phba->fcf.fcfi,
2161 phba->sli4_hba.vfi_ids[vport->vfi], 2163 phba->sli4_hba.vfi_ids[vport->vfi],
2162 phba->vpi_ids[vport->vpi], 2164 phba->vpi_ids[vport->vpi],
2163 reg_vfi->wwn[0], reg_vfi->wwn[1]); 2165 reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
2166 vport->port_state, phba->fc_topology_changed);
2164} 2167}
2165 2168
2166/** 2169/**
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index cd86069a0ba8..812d0cd7c86d 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -64,18 +64,26 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
64 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 64 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
65 int i; 65 int i;
66 66
67 if (phba->sli_rev == LPFC_SLI_REV4) 67 if (phba->sli_rev == LPFC_SLI_REV4) {
68 /* Calculate alignment */
69 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
70 i = phba->cfg_sg_dma_buf_size;
71 else
72 i = SLI4_PAGE_SIZE;
73
68 phba->lpfc_scsi_dma_buf_pool = 74 phba->lpfc_scsi_dma_buf_pool =
69 pci_pool_create("lpfc_scsi_dma_buf_pool", 75 pci_pool_create("lpfc_scsi_dma_buf_pool",
70 phba->pcidev, 76 phba->pcidev,
71 phba->cfg_sg_dma_buf_size, 77 phba->cfg_sg_dma_buf_size,
72 phba->cfg_sg_dma_buf_size, 78 i,
73 0); 79 0);
74 else 80 } else {
75 phba->lpfc_scsi_dma_buf_pool = 81 phba->lpfc_scsi_dma_buf_pool =
76 pci_pool_create("lpfc_scsi_dma_buf_pool", 82 pci_pool_create("lpfc_scsi_dma_buf_pool",
77 phba->pcidev, phba->cfg_sg_dma_buf_size, 83 phba->pcidev, phba->cfg_sg_dma_buf_size,
78 align, 0); 84 align, 0);
85 }
86
79 if (!phba->lpfc_scsi_dma_buf_pool) 87 if (!phba->lpfc_scsi_dma_buf_pool)
80 goto fail; 88 goto fail;
81 89
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 82f4d3542289..31e9b92f5a9b 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -332,9 +332,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
332 332
333 /* PLOGI chkparm OK */ 333 /* PLOGI chkparm OK */
334 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 334 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
335 "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", 335 "0114 PLOGI chkparm OK Data: x%x x%x x%x "
336 "x%x x%x x%x\n",
336 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, 337 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
337 ndlp->nlp_rpi); 338 ndlp->nlp_rpi, vport->port_state,
339 vport->fc_flag);
338 340
339 if (vport->cfg_fcp_class == 2 && sp->cls2.classValid) 341 if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
340 ndlp->nlp_fcp_info |= CLASS2; 342 ndlp->nlp_fcp_info |= CLASS2;
@@ -574,7 +576,7 @@ out:
574 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 576 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
575 577
576 /* 1 sec timeout */ 578 /* 1 sec timeout */
577 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 579 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
578 580
579 spin_lock_irq(shost->host_lock); 581 spin_lock_irq(shost->host_lock);
580 ndlp->nlp_flag |= NLP_DELAY_TMO; 582 ndlp->nlp_flag |= NLP_DELAY_TMO;
@@ -631,7 +633,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
631 * If there are other active VLinks present, 633 * If there are other active VLinks present,
632 * re-instantiate the Vlink using FDISC. 634 * re-instantiate the Vlink using FDISC.
633 */ 635 */
634 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 636 mod_timer(&ndlp->nlp_delayfunc,
637 jiffies + msecs_to_jiffies(1000));
635 spin_lock_irq(shost->host_lock); 638 spin_lock_irq(shost->host_lock);
636 ndlp->nlp_flag |= NLP_DELAY_TMO; 639 ndlp->nlp_flag |= NLP_DELAY_TMO;
637 spin_unlock_irq(shost->host_lock); 640 spin_unlock_irq(shost->host_lock);
@@ -648,7 +651,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
648 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || 651 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
649 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 652 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
650 /* Only try to re-login if this is NOT a Fabric Node */ 653 /* Only try to re-login if this is NOT a Fabric Node */
651 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 654 mod_timer(&ndlp->nlp_delayfunc,
655 jiffies + msecs_to_jiffies(1000 * 1));
652 spin_lock_irq(shost->host_lock); 656 spin_lock_irq(shost->host_lock);
653 ndlp->nlp_flag |= NLP_DELAY_TMO; 657 ndlp->nlp_flag |= NLP_DELAY_TMO;
654 spin_unlock_irq(shost->host_lock); 658 spin_unlock_irq(shost->host_lock);
@@ -969,7 +973,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
969 } 973 }
970 974
971 /* Put ndlp in npr state set plogi timer for 1 sec */ 975 /* Put ndlp in npr state set plogi timer for 1 sec */
972 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 976 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
973 spin_lock_irq(shost->host_lock); 977 spin_lock_irq(shost->host_lock);
974 ndlp->nlp_flag |= NLP_DELAY_TMO; 978 ndlp->nlp_flag |= NLP_DELAY_TMO;
975 spin_unlock_irq(shost->host_lock); 979 spin_unlock_irq(shost->host_lock);
@@ -1303,7 +1307,8 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1303 if ((irsp->ulpStatus) || 1307 if ((irsp->ulpStatus) ||
1304 (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { 1308 (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
1305 /* 1 sec timeout */ 1309 /* 1 sec timeout */
1306 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 1310 mod_timer(&ndlp->nlp_delayfunc,
1311 jiffies + msecs_to_jiffies(1000));
1307 spin_lock_irq(shost->host_lock); 1312 spin_lock_irq(shost->host_lock);
1308 ndlp->nlp_flag |= NLP_DELAY_TMO; 1313 ndlp->nlp_flag |= NLP_DELAY_TMO;
1309 spin_unlock_irq(shost->host_lock); 1314 spin_unlock_irq(shost->host_lock);
@@ -1509,7 +1514,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1509 } 1514 }
1510 1515
1511 /* Put ndlp in npr state set plogi timer for 1 sec */ 1516 /* Put ndlp in npr state set plogi timer for 1 sec */
1512 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1517 mod_timer(&ndlp->nlp_delayfunc,
1518 jiffies + msecs_to_jiffies(1000 * 1));
1513 spin_lock_irq(shost->host_lock); 1519 spin_lock_irq(shost->host_lock);
1514 ndlp->nlp_flag |= NLP_DELAY_TMO; 1520 ndlp->nlp_flag |= NLP_DELAY_TMO;
1515 spin_unlock_irq(shost->host_lock); 1521 spin_unlock_irq(shost->host_lock);
@@ -2145,7 +2151,8 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2145 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 2151 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
2146 2152
2147 if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { 2153 if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
2148 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 2154 mod_timer(&ndlp->nlp_delayfunc,
2155 jiffies + msecs_to_jiffies(1000 * 1));
2149 spin_lock_irq(shost->host_lock); 2156 spin_lock_irq(shost->host_lock);
2150 ndlp->nlp_flag |= NLP_DELAY_TMO; 2157 ndlp->nlp_flag |= NLP_DELAY_TMO;
2151 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2158 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 74b8710e1e90..8523b278ec9d 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -24,6 +24,8 @@
24#include <linux/export.h> 24#include <linux/export.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <asm/unaligned.h> 26#include <asm/unaligned.h>
27#include <linux/crc-t10dif.h>
28#include <net/checksum.h>
27 29
28#include <scsi/scsi.h> 30#include <scsi/scsi.h>
29#include <scsi/scsi_device.h> 31#include <scsi/scsi_device.h>
@@ -48,7 +50,7 @@
48#define LPFC_RESET_WAIT 2 50#define LPFC_RESET_WAIT 2
49#define LPFC_ABORT_WAIT 2 51#define LPFC_ABORT_WAIT 2
50 52
51int _dump_buf_done; 53int _dump_buf_done = 1;
52 54
53static char *dif_op_str[] = { 55static char *dif_op_str[] = {
54 "PROT_NORMAL", 56 "PROT_NORMAL",
@@ -66,6 +68,10 @@ struct scsi_dif_tuple {
66 __be32 ref_tag; /* Target LBA or indirect LBA */ 68 __be32 ref_tag; /* Target LBA or indirect LBA */
67}; 69};
68 70
71#if !defined(SCSI_PROT_GUARD_CHECK) || !defined(SCSI_PROT_REF_CHECK)
72#define scsi_prot_flagged(sc, flg) sc
73#endif
74
69static void 75static void
70lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); 76lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
71static void 77static void
@@ -534,7 +540,16 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
534 dma_addr_t pdma_phys_fcp_rsp; 540 dma_addr_t pdma_phys_fcp_rsp;
535 dma_addr_t pdma_phys_bpl; 541 dma_addr_t pdma_phys_bpl;
536 uint16_t iotag; 542 uint16_t iotag;
537 int bcnt; 543 int bcnt, bpl_size;
544
545 bpl_size = phba->cfg_sg_dma_buf_size -
546 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
547
548 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
549 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
550 num_to_alloc, phba->cfg_sg_dma_buf_size,
551 (int)sizeof(struct fcp_cmnd),
552 (int)sizeof(struct fcp_rsp), bpl_size);
538 553
539 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 554 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
540 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 555 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
@@ -759,7 +774,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
759 struct list_head *post_sblist, int sb_count) 774 struct list_head *post_sblist, int sb_count)
760{ 775{
761 struct lpfc_scsi_buf *psb, *psb_next; 776 struct lpfc_scsi_buf *psb, *psb_next;
762 int status; 777 int status, sgl_size;
763 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; 778 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
764 dma_addr_t pdma_phys_bpl1; 779 dma_addr_t pdma_phys_bpl1;
765 int last_xritag = NO_XRI; 780 int last_xritag = NO_XRI;
@@ -771,6 +786,9 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
771 if (sb_count <= 0) 786 if (sb_count <= 0)
772 return -EINVAL; 787 return -EINVAL;
773 788
789 sgl_size = phba->cfg_sg_dma_buf_size -
790 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
791
774 list_for_each_entry_safe(psb, psb_next, post_sblist, list) { 792 list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
775 list_del_init(&psb->list); 793 list_del_init(&psb->list);
776 block_cnt++; 794 block_cnt++;
@@ -803,7 +821,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
803 post_cnt = block_cnt; 821 post_cnt = block_cnt;
804 } else if (block_cnt == 1) { 822 } else if (block_cnt == 1) {
805 /* last single sgl with non-contiguous xri */ 823 /* last single sgl with non-contiguous xri */
806 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 824 if (sgl_size > SGL_PAGE_SIZE)
807 pdma_phys_bpl1 = psb->dma_phys_bpl + 825 pdma_phys_bpl1 = psb->dma_phys_bpl +
808 SGL_PAGE_SIZE; 826 SGL_PAGE_SIZE;
809 else 827 else
@@ -885,9 +903,12 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
885 int num_posted, rc = 0; 903 int num_posted, rc = 0;
886 904
887 /* get all SCSI buffers need to repost to a local list */ 905 /* get all SCSI buffers need to repost to a local list */
888 spin_lock_irq(&phba->scsi_buf_list_lock); 906 spin_lock_irq(&phba->scsi_buf_list_get_lock);
889 list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist); 907 spin_lock_irq(&phba->scsi_buf_list_put_lock);
890 spin_unlock_irq(&phba->scsi_buf_list_lock); 908 list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
909 list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
910 spin_unlock_irq(&phba->scsi_buf_list_put_lock);
911 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
891 912
892 /* post the list of scsi buffer sgls to port if available */ 913 /* post the list of scsi buffer sgls to port if available */
893 if (!list_empty(&post_sblist)) { 914 if (!list_empty(&post_sblist)) {
@@ -923,13 +944,22 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
923 IOCB_t *iocb; 944 IOCB_t *iocb;
924 dma_addr_t pdma_phys_fcp_cmd; 945 dma_addr_t pdma_phys_fcp_cmd;
925 dma_addr_t pdma_phys_fcp_rsp; 946 dma_addr_t pdma_phys_fcp_rsp;
926 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; 947 dma_addr_t pdma_phys_bpl;
927 uint16_t iotag, lxri = 0; 948 uint16_t iotag, lxri = 0;
928 int bcnt, num_posted; 949 int bcnt, num_posted, sgl_size;
929 LIST_HEAD(prep_sblist); 950 LIST_HEAD(prep_sblist);
930 LIST_HEAD(post_sblist); 951 LIST_HEAD(post_sblist);
931 LIST_HEAD(scsi_sblist); 952 LIST_HEAD(scsi_sblist);
932 953
954 sgl_size = phba->cfg_sg_dma_buf_size -
955 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
956
957 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
958 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
959 num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
960 (int)sizeof(struct fcp_cmnd),
961 (int)sizeof(struct fcp_rsp));
962
933 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 963 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
934 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 964 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
935 if (!psb) 965 if (!psb)
@@ -948,6 +978,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
948 } 978 }
949 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 979 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
950 980
981 /* Page alignment is CRITICAL, double check to be sure */
982 if (((unsigned long)(psb->data) &
983 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) {
984 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
985 psb->data, psb->dma_handle);
986 kfree(psb);
987 break;
988 }
989
951 /* Allocate iotag for psb->cur_iocbq. */ 990 /* Allocate iotag for psb->cur_iocbq. */
952 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 991 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
953 if (iotag == 0) { 992 if (iotag == 0) {
@@ -968,17 +1007,14 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
968 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 1007 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
969 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 1008 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
970 psb->fcp_bpl = psb->data; 1009 psb->fcp_bpl = psb->data;
971 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) 1010 psb->fcp_cmnd = (psb->data + sgl_size);
972 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
973 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + 1011 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
974 sizeof(struct fcp_cmnd)); 1012 sizeof(struct fcp_cmnd));
975 1013
976 /* Initialize local short-hand pointers. */ 1014 /* Initialize local short-hand pointers. */
977 sgl = (struct sli4_sge *)psb->fcp_bpl; 1015 sgl = (struct sli4_sge *)psb->fcp_bpl;
978 pdma_phys_bpl = psb->dma_handle; 1016 pdma_phys_bpl = psb->dma_handle;
979 pdma_phys_fcp_cmd = 1017 pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
980 (psb->dma_handle + phba->cfg_sg_dma_buf_size)
981 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
982 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); 1018 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
983 1019
984 /* 1020 /*
@@ -1020,17 +1056,13 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
1020 iocb->ulpLe = 1; 1056 iocb->ulpLe = 1;
1021 iocb->ulpClass = CLASS3; 1057 iocb->ulpClass = CLASS3;
1022 psb->cur_iocbq.context1 = psb; 1058 psb->cur_iocbq.context1 = psb;
1023 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
1024 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
1025 else
1026 pdma_phys_bpl1 = 0;
1027 psb->dma_phys_bpl = pdma_phys_bpl; 1059 psb->dma_phys_bpl = pdma_phys_bpl;
1028 1060
1029 /* add the scsi buffer to a post list */ 1061 /* add the scsi buffer to a post list */
1030 list_add_tail(&psb->list, &post_sblist); 1062 list_add_tail(&psb->list, &post_sblist);
1031 spin_lock_irq(&phba->scsi_buf_list_lock); 1063 spin_lock_irq(&phba->scsi_buf_list_get_lock);
1032 phba->sli4_hba.scsi_xri_cnt++; 1064 phba->sli4_hba.scsi_xri_cnt++;
1033 spin_unlock_irq(&phba->scsi_buf_list_lock); 1065 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
1034 } 1066 }
1035 lpfc_printf_log(phba, KERN_INFO, LOG_BG, 1067 lpfc_printf_log(phba, KERN_INFO, LOG_BG,
1036 "3021 Allocate %d out of %d requested new SCSI " 1068 "3021 Allocate %d out of %d requested new SCSI "
@@ -1079,17 +1111,23 @@ static struct lpfc_scsi_buf*
1079lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1111lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1080{ 1112{
1081 struct lpfc_scsi_buf * lpfc_cmd = NULL; 1113 struct lpfc_scsi_buf * lpfc_cmd = NULL;
1082 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 1114 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
1083 unsigned long iflag = 0; 1115 unsigned long gflag = 0;
1084 1116 unsigned long pflag = 0;
1085 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 1117
1086 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 1118 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
1087 if (lpfc_cmd) { 1119 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
1088 lpfc_cmd->seg_cnt = 0; 1120 list);
1089 lpfc_cmd->nonsg_phys = 0; 1121 if (!lpfc_cmd) {
1090 lpfc_cmd->prot_seg_cnt = 0; 1122 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
1123 list_splice(&phba->lpfc_scsi_buf_list_put,
1124 &phba->lpfc_scsi_buf_list_get);
1125 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1126 list_remove_head(scsi_buf_list_get, lpfc_cmd,
1127 struct lpfc_scsi_buf, list);
1128 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
1091 } 1129 }
1092 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 1130 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
1093 return lpfc_cmd; 1131 return lpfc_cmd;
1094} 1132}
1095/** 1133/**
@@ -1107,28 +1145,39 @@ static struct lpfc_scsi_buf*
1107lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1145lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1108{ 1146{
1109 struct lpfc_scsi_buf *lpfc_cmd ; 1147 struct lpfc_scsi_buf *lpfc_cmd ;
1110 unsigned long iflag = 0; 1148 unsigned long gflag = 0;
1149 unsigned long pflag = 0;
1111 int found = 0; 1150 int found = 0;
1112 1151
1113 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 1152 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
1114 list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list, 1153 list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, list) {
1115 list) {
1116 if (lpfc_test_rrq_active(phba, ndlp, 1154 if (lpfc_test_rrq_active(phba, ndlp,
1117 lpfc_cmd->cur_iocbq.sli4_lxritag)) 1155 lpfc_cmd->cur_iocbq.sli4_lxritag))
1118 continue; 1156 continue;
1119 list_del(&lpfc_cmd->list); 1157 list_del(&lpfc_cmd->list);
1120 found = 1; 1158 found = 1;
1121 lpfc_cmd->seg_cnt = 0;
1122 lpfc_cmd->nonsg_phys = 0;
1123 lpfc_cmd->prot_seg_cnt = 0;
1124 break; 1159 break;
1125 } 1160 }
1126 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, 1161 if (!found) {
1127 iflag); 1162 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
1163 list_splice(&phba->lpfc_scsi_buf_list_put,
1164 &phba->lpfc_scsi_buf_list_get);
1165 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1166 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
1167 list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get,
1168 list) {
1169 if (lpfc_test_rrq_active(
1170 phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
1171 continue;
1172 list_del(&lpfc_cmd->list);
1173 found = 1;
1174 break;
1175 }
1176 }
1177 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
1128 if (!found) 1178 if (!found)
1129 return NULL; 1179 return NULL;
1130 else 1180 return lpfc_cmd;
1131 return lpfc_cmd;
1132} 1181}
1133/** 1182/**
1134 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 1183 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
@@ -1160,10 +1209,15 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1160{ 1209{
1161 unsigned long iflag = 0; 1210 unsigned long iflag = 0;
1162 1211
1163 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 1212 psb->seg_cnt = 0;
1213 psb->nonsg_phys = 0;
1214 psb->prot_seg_cnt = 0;
1215
1216 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1164 psb->pCmd = NULL; 1217 psb->pCmd = NULL;
1165 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 1218 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1166 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 1219 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1220 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1167} 1221}
1168 1222
1169/** 1223/**
@@ -1181,6 +1235,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1181{ 1235{
1182 unsigned long iflag = 0; 1236 unsigned long iflag = 0;
1183 1237
1238 psb->seg_cnt = 0;
1239 psb->nonsg_phys = 0;
1240 psb->prot_seg_cnt = 0;
1241
1184 if (psb->exch_busy) { 1242 if (psb->exch_busy) {
1185 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, 1243 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
1186 iflag); 1244 iflag);
@@ -1190,11 +1248,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1190 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, 1248 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
1191 iflag); 1249 iflag);
1192 } else { 1250 } else {
1193
1194 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1195 psb->pCmd = NULL; 1251 psb->pCmd = NULL;
1196 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 1252 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1197 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 1253 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1254 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1255 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1198 } 1256 }
1199} 1257}
1200 1258
@@ -1268,6 +1326,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1268 "dma_map_sg. Config %d, seg_cnt %d\n", 1326 "dma_map_sg. Config %d, seg_cnt %d\n",
1269 __func__, phba->cfg_sg_seg_cnt, 1327 __func__, phba->cfg_sg_seg_cnt,
1270 lpfc_cmd->seg_cnt); 1328 lpfc_cmd->seg_cnt);
1329 lpfc_cmd->seg_cnt = 0;
1271 scsi_dma_unmap(scsi_cmnd); 1330 scsi_dma_unmap(scsi_cmnd);
1272 return 1; 1331 return 1;
1273 } 1332 }
@@ -2013,9 +2072,21 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2013 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 2072 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2014 bf_set(pde6_optx, pde6, txop); 2073 bf_set(pde6_optx, pde6, txop);
2015 bf_set(pde6_oprx, pde6, rxop); 2074 bf_set(pde6_oprx, pde6, rxop);
2075
2076 /*
2077 * We only need to check the data on READs, for WRITEs
2078 * protection data is automatically generated, not checked.
2079 */
2016 if (datadir == DMA_FROM_DEVICE) { 2080 if (datadir == DMA_FROM_DEVICE) {
2017 bf_set(pde6_ce, pde6, checking); 2081 if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
2018 bf_set(pde6_re, pde6, checking); 2082 bf_set(pde6_ce, pde6, checking);
2083 else
2084 bf_set(pde6_ce, pde6, 0);
2085
2086 if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
2087 bf_set(pde6_re, pde6, checking);
2088 else
2089 bf_set(pde6_re, pde6, 0);
2019 } 2090 }
2020 bf_set(pde6_ai, pde6, 1); 2091 bf_set(pde6_ai, pde6, 1);
2021 bf_set(pde6_ae, pde6, 0); 2092 bf_set(pde6_ae, pde6, 0);
@@ -2145,6 +2216,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2145 2216
2146 split_offset = 0; 2217 split_offset = 0;
2147 do { 2218 do {
2219 /* Check to see if we ran out of space */
2220 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
2221 return num_bde + 3;
2222
2148 /* setup PDE5 with what we have */ 2223 /* setup PDE5 with what we have */
2149 pde5 = (struct lpfc_pde5 *) bpl; 2224 pde5 = (struct lpfc_pde5 *) bpl;
2150 memset(pde5, 0, sizeof(struct lpfc_pde5)); 2225 memset(pde5, 0, sizeof(struct lpfc_pde5));
@@ -2164,8 +2239,17 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2164 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 2239 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2165 bf_set(pde6_optx, pde6, txop); 2240 bf_set(pde6_optx, pde6, txop);
2166 bf_set(pde6_oprx, pde6, rxop); 2241 bf_set(pde6_oprx, pde6, rxop);
2167 bf_set(pde6_ce, pde6, checking); 2242
2168 bf_set(pde6_re, pde6, checking); 2243 if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
2244 bf_set(pde6_ce, pde6, checking);
2245 else
2246 bf_set(pde6_ce, pde6, 0);
2247
2248 if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
2249 bf_set(pde6_re, pde6, checking);
2250 else
2251 bf_set(pde6_re, pde6, 0);
2252
2169 bf_set(pde6_ai, pde6, 1); 2253 bf_set(pde6_ai, pde6, 1);
2170 bf_set(pde6_ae, pde6, 0); 2254 bf_set(pde6_ae, pde6, 0);
2171 bf_set(pde6_apptagval, pde6, 0); 2255 bf_set(pde6_apptagval, pde6, 0);
@@ -2213,6 +2297,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2213 pgdone = 0; 2297 pgdone = 0;
2214 subtotal = 0; /* total bytes processed for current prot grp */ 2298 subtotal = 0; /* total bytes processed for current prot grp */
2215 while (!pgdone) { 2299 while (!pgdone) {
2300 /* Check to see if we ran out of space */
2301 if (num_bde >= phba->cfg_total_seg_cnt)
2302 return num_bde + 1;
2303
2216 if (!sgde) { 2304 if (!sgde) {
2217 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2305 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2218 "9065 BLKGRD:%s Invalid data segment\n", 2306 "9065 BLKGRD:%s Invalid data segment\n",
@@ -2324,7 +2412,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2324 struct sli4_sge_diseed *diseed = NULL; 2412 struct sli4_sge_diseed *diseed = NULL;
2325 dma_addr_t physaddr; 2413 dma_addr_t physaddr;
2326 int i = 0, num_sge = 0, status; 2414 int i = 0, num_sge = 0, status;
2327 int datadir = sc->sc_data_direction;
2328 uint32_t reftag; 2415 uint32_t reftag;
2329 unsigned blksize; 2416 unsigned blksize;
2330 uint8_t txop, rxop; 2417 uint8_t txop, rxop;
@@ -2362,13 +2449,26 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2362 diseed->ref_tag = cpu_to_le32(reftag); 2449 diseed->ref_tag = cpu_to_le32(reftag);
2363 diseed->ref_tag_tran = diseed->ref_tag; 2450 diseed->ref_tag_tran = diseed->ref_tag;
2364 2451
2452 /*
2453 * We only need to check the data on READs, for WRITEs
2454 * protection data is automatically generated, not checked.
2455 */
2456 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2457 if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
2458 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2459 else
2460 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2461
2462 if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
2463 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2464 else
2465 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2466 }
2467
2365 /* setup DISEED with the rest of the info */ 2468 /* setup DISEED with the rest of the info */
2366 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2469 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2367 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2470 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2368 if (datadir == DMA_FROM_DEVICE) { 2471
2369 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2370 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2371 }
2372 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2472 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2373 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2473 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2374 2474
@@ -2497,6 +2597,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2497 2597
2498 split_offset = 0; 2598 split_offset = 0;
2499 do { 2599 do {
2600 /* Check to see if we ran out of space */
2601 if (num_sge >= (phba->cfg_total_seg_cnt - 2))
2602 return num_sge + 3;
2603
2500 /* setup DISEED with what we have */ 2604 /* setup DISEED with what we have */
2501 diseed = (struct sli4_sge_diseed *) sgl; 2605 diseed = (struct sli4_sge_diseed *) sgl;
2502 memset(diseed, 0, sizeof(struct sli4_sge_diseed)); 2606 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
@@ -2506,11 +2610,34 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2506 diseed->ref_tag = cpu_to_le32(reftag); 2610 diseed->ref_tag = cpu_to_le32(reftag);
2507 diseed->ref_tag_tran = diseed->ref_tag; 2611 diseed->ref_tag_tran = diseed->ref_tag;
2508 2612
2613 if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) {
2614 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2615
2616 } else {
2617 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2618 /*
2619 * When in this mode, the hardware will replace
2620 * the guard tag from the host with a
2621 * newly generated good CRC for the wire.
2622 * Switch to raw mode here to avoid this
2623 * behavior. What the host sends gets put on the wire.
2624 */
2625 if (txop == BG_OP_IN_CRC_OUT_CRC) {
2626 txop = BG_OP_RAW_MODE;
2627 rxop = BG_OP_RAW_MODE;
2628 }
2629 }
2630
2631
2632 if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
2633 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2634 else
2635 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2636
2509 /* setup DISEED with the rest of the info */ 2637 /* setup DISEED with the rest of the info */
2510 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2638 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2511 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2639 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2512 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); 2640
2513 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2514 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2641 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2515 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2642 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2516 2643
@@ -2556,6 +2683,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2556 pgdone = 0; 2683 pgdone = 0;
2557 subtotal = 0; /* total bytes processed for current prot grp */ 2684 subtotal = 0; /* total bytes processed for current prot grp */
2558 while (!pgdone) { 2685 while (!pgdone) {
2686 /* Check to see if we ran out of space */
2687 if (num_sge >= phba->cfg_total_seg_cnt)
2688 return num_sge + 1;
2689
2559 if (!sgde) { 2690 if (!sgde) {
2560 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2691 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2561 "9086 BLKGRD:%s Invalid data segment\n", 2692 "9086 BLKGRD:%s Invalid data segment\n",
@@ -2670,6 +2801,47 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2670} 2801}
2671 2802
2672/** 2803/**
2804 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2805 * @phba: The Hba for which this call is being executed.
2806 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2807 *
2808 * Adjust the data length to account for how much data
2809 * is actually on the wire.
2810 *
2811 * returns the adjusted data length
2812 **/
2813static int
2814lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2815 struct lpfc_scsi_buf *lpfc_cmd)
2816{
2817 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2818 int fcpdl;
2819
2820 fcpdl = scsi_bufflen(sc);
2821
2822 /* Check if there is protection data on the wire */
2823 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2824 /* Read */
2825 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2826 return fcpdl;
2827
2828 } else {
2829 /* Write */
2830 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2831 return fcpdl;
2832 }
2833
2834 /*
2835 * If we are in DIF Type 1 mode every data block has a 8 byte
2836 * DIF (trailer) attached to it. Must ajust FCP data length.
2837 */
2838 if (scsi_prot_flagged(sc, SCSI_PROT_TRANSFER_PI))
2839 fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2840
2841 return fcpdl;
2842}
2843
2844/**
2673 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 2845 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2674 * @phba: The Hba for which this call is being executed. 2846 * @phba: The Hba for which this call is being executed.
2675 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 2847 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
@@ -2689,8 +2861,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2689 uint32_t num_bde = 0; 2861 uint32_t num_bde = 0;
2690 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 2862 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2691 int prot_group_type = 0; 2863 int prot_group_type = 0;
2692 int diflen, fcpdl; 2864 int fcpdl;
2693 unsigned blksize;
2694 2865
2695 /* 2866 /*
2696 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd 2867 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
@@ -2711,28 +2882,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2711 return 1; 2882 return 1;
2712 2883
2713 lpfc_cmd->seg_cnt = datasegcnt; 2884 lpfc_cmd->seg_cnt = datasegcnt;
2714 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 2885
2715 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2886 /* First check if data segment count from SCSI Layer is good */
2716 "9067 BLKGRD: %s: Too many sg segments" 2887 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
2717 " from dma_map_sg. Config %d, seg_cnt" 2888 goto err;
2718 " %d\n",
2719 __func__, phba->cfg_sg_seg_cnt,
2720 lpfc_cmd->seg_cnt);
2721 scsi_dma_unmap(scsi_cmnd);
2722 return 1;
2723 }
2724 2889
2725 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 2890 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2726 2891
2727 switch (prot_group_type) { 2892 switch (prot_group_type) {
2728 case LPFC_PG_TYPE_NO_DIF: 2893 case LPFC_PG_TYPE_NO_DIF:
2894
2895 /* Here we need to add a PDE5 and PDE6 to the count */
2896 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
2897 goto err;
2898
2729 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, 2899 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2730 datasegcnt); 2900 datasegcnt);
2731 /* we should have 2 or more entries in buffer list */ 2901 /* we should have 2 or more entries in buffer list */
2732 if (num_bde < 2) 2902 if (num_bde < 2)
2733 goto err; 2903 goto err;
2734 break; 2904 break;
2735 case LPFC_PG_TYPE_DIF_BUF:{ 2905
2906 case LPFC_PG_TYPE_DIF_BUF:
2736 /* 2907 /*
2737 * This type indicates that protection buffers are 2908 * This type indicates that protection buffers are
2738 * passed to the driver, so that needs to be prepared 2909 * passed to the driver, so that needs to be prepared
@@ -2747,31 +2918,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2747 } 2918 }
2748 2919
2749 lpfc_cmd->prot_seg_cnt = protsegcnt; 2920 lpfc_cmd->prot_seg_cnt = protsegcnt;
2750 if (lpfc_cmd->prot_seg_cnt 2921
2751 > phba->cfg_prot_sg_seg_cnt) { 2922 /*
2752 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2923 * There is a minimun of 4 BPLs used for every
2753 "9068 BLKGRD: %s: Too many prot sg " 2924 * protection data segment.
2754 "segments from dma_map_sg. Config %d," 2925 */
2755 "prot_seg_cnt %d\n", __func__, 2926 if ((lpfc_cmd->prot_seg_cnt * 4) >
2756 phba->cfg_prot_sg_seg_cnt, 2927 (phba->cfg_total_seg_cnt - 2))
2757 lpfc_cmd->prot_seg_cnt); 2928 goto err;
2758 dma_unmap_sg(&phba->pcidev->dev,
2759 scsi_prot_sglist(scsi_cmnd),
2760 scsi_prot_sg_count(scsi_cmnd),
2761 datadir);
2762 scsi_dma_unmap(scsi_cmnd);
2763 return 1;
2764 }
2765 2929
2766 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, 2930 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2767 datasegcnt, protsegcnt); 2931 datasegcnt, protsegcnt);
2768 /* we should have 3 or more entries in buffer list */ 2932 /* we should have 3 or more entries in buffer list */
2769 if (num_bde < 3) 2933 if ((num_bde < 3) ||
2934 (num_bde > phba->cfg_total_seg_cnt))
2770 goto err; 2935 goto err;
2771 break; 2936 break;
2772 } 2937
2773 case LPFC_PG_TYPE_INVALID: 2938 case LPFC_PG_TYPE_INVALID:
2774 default: 2939 default:
2940 scsi_dma_unmap(scsi_cmnd);
2941 lpfc_cmd->seg_cnt = 0;
2942
2775 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 2943 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2776 "9022 Unexpected protection group %i\n", 2944 "9022 Unexpected protection group %i\n",
2777 prot_group_type); 2945 prot_group_type);
@@ -2790,18 +2958,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2790 iocb_cmd->ulpBdeCount = 1; 2958 iocb_cmd->ulpBdeCount = 1;
2791 iocb_cmd->ulpLe = 1; 2959 iocb_cmd->ulpLe = 1;
2792 2960
2793 fcpdl = scsi_bufflen(scsi_cmnd); 2961 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2794
2795 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
2796 /*
2797 * We are in DIF Type 1 mode
2798 * Every data block has a 8 byte DIF (trailer)
2799 * attached to it. Must ajust FCP data length
2800 */
2801 blksize = lpfc_cmd_blksize(scsi_cmnd);
2802 diflen = (fcpdl / blksize) * 8;
2803 fcpdl += diflen;
2804 }
2805 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 2962 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2806 2963
2807 /* 2964 /*
@@ -2812,14 +2969,234 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2812 2969
2813 return 0; 2970 return 0;
2814err: 2971err:
2972 if (lpfc_cmd->seg_cnt)
2973 scsi_dma_unmap(scsi_cmnd);
2974 if (lpfc_cmd->prot_seg_cnt)
2975 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2976 scsi_prot_sg_count(scsi_cmnd),
2977 scsi_cmnd->sc_data_direction);
2978
2815 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 2979 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2816 "9023 Could not setup all needed BDE's" 2980 "9023 Cannot setup S/G List for HBA"
2817 "prot_group_type=%d, num_bde=%d\n", 2981 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2982 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2983 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2818 prot_group_type, num_bde); 2984 prot_group_type, num_bde);
2985
2986 lpfc_cmd->seg_cnt = 0;
2987 lpfc_cmd->prot_seg_cnt = 0;
2819 return 1; 2988 return 1;
2820} 2989}
2821 2990
2822/* 2991/*
2992 * This function calcuates the T10 DIF guard tag
2993 * on the specified data using a CRC algorithmn
2994 * using crc_t10dif.
2995 */
2996uint16_t
2997lpfc_bg_crc(uint8_t *data, int count)
2998{
2999 uint16_t crc = 0;
3000 uint16_t x;
3001
3002 crc = crc_t10dif(data, count);
3003 x = cpu_to_be16(crc);
3004 return x;
3005}
3006
3007/*
3008 * This function calcuates the T10 DIF guard tag
3009 * on the specified data using a CSUM algorithmn
3010 * using ip_compute_csum.
3011 */
3012uint16_t
3013lpfc_bg_csum(uint8_t *data, int count)
3014{
3015 uint16_t ret;
3016
3017 ret = ip_compute_csum(data, count);
3018 return ret;
3019}
3020
3021/*
3022 * This function examines the protection data to try to determine
3023 * what type of T10-DIF error occurred.
3024 */
3025void
3026lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3027{
3028 struct scatterlist *sgpe; /* s/g prot entry */
3029 struct scatterlist *sgde; /* s/g data entry */
3030 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3031 struct scsi_dif_tuple *src = NULL;
3032 uint8_t *data_src = NULL;
3033 uint16_t guard_tag, guard_type;
3034 uint16_t start_app_tag, app_tag;
3035 uint32_t start_ref_tag, ref_tag;
3036 int prot, protsegcnt;
3037 int err_type, len, data_len;
3038 int chk_ref, chk_app, chk_guard;
3039 uint16_t sum;
3040 unsigned blksize;
3041
3042 err_type = BGS_GUARD_ERR_MASK;
3043 sum = 0;
3044 guard_tag = 0;
3045
3046 /* First check to see if there is protection data to examine */
3047 prot = scsi_get_prot_op(cmd);
3048 if ((prot == SCSI_PROT_READ_STRIP) ||
3049 (prot == SCSI_PROT_WRITE_INSERT) ||
3050 (prot == SCSI_PROT_NORMAL))
3051 goto out;
3052
3053 /* Currently the driver just supports ref_tag and guard_tag checking */
3054 chk_ref = 1;
3055 chk_app = 0;
3056 chk_guard = 0;
3057
3058 /* Setup a ptr to the protection data provided by the SCSI host */
3059 sgpe = scsi_prot_sglist(cmd);
3060 protsegcnt = lpfc_cmd->prot_seg_cnt;
3061
3062 if (sgpe && protsegcnt) {
3063
3064 /*
3065 * We will only try to verify guard tag if the segment
3066 * data length is a multiple of the blksize.
3067 */
3068 sgde = scsi_sglist(cmd);
3069 blksize = lpfc_cmd_blksize(cmd);
3070 data_src = (uint8_t *)sg_virt(sgde);
3071 data_len = sgde->length;
3072 if ((data_len & (blksize - 1)) == 0)
3073 chk_guard = 1;
3074 guard_type = scsi_host_get_guard(cmd->device->host);
3075
3076 start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
3077 start_app_tag = src->app_tag;
3078 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
3079 len = sgpe->length;
3080 while (src && protsegcnt) {
3081 while (len) {
3082
3083 /*
3084 * First check to see if a protection data
3085 * check is valid
3086 */
3087 if ((src->ref_tag == 0xffffffff) ||
3088 (src->app_tag == 0xffff)) {
3089 start_ref_tag++;
3090 goto skipit;
3091 }
3092
3093 /* App Tag checking */
3094 app_tag = src->app_tag;
3095 if (chk_app && (app_tag != start_app_tag)) {
3096 err_type = BGS_APPTAG_ERR_MASK;
3097 goto out;
3098 }
3099
3100 /* Reference Tag checking */
3101 ref_tag = be32_to_cpu(src->ref_tag);
3102 if (chk_ref && (ref_tag != start_ref_tag)) {
3103 err_type = BGS_REFTAG_ERR_MASK;
3104 goto out;
3105 }
3106 start_ref_tag++;
3107
3108 /* Guard Tag checking */
3109 if (chk_guard) {
3110 guard_tag = src->guard_tag;
3111 if (guard_type == SHOST_DIX_GUARD_IP)
3112 sum = lpfc_bg_csum(data_src,
3113 blksize);
3114 else
3115 sum = lpfc_bg_crc(data_src,
3116 blksize);
3117 if ((guard_tag != sum)) {
3118 err_type = BGS_GUARD_ERR_MASK;
3119 goto out;
3120 }
3121 }
3122skipit:
3123 len -= sizeof(struct scsi_dif_tuple);
3124 if (len < 0)
3125 len = 0;
3126 src++;
3127
3128 data_src += blksize;
3129 data_len -= blksize;
3130
3131 /*
3132 * Are we at the end of the Data segment?
3133 * The data segment is only used for Guard
3134 * tag checking.
3135 */
3136 if (chk_guard && (data_len == 0)) {
3137 chk_guard = 0;
3138 sgde = sg_next(sgde);
3139 if (!sgde)
3140 goto out;
3141
3142 data_src = (uint8_t *)sg_virt(sgde);
3143 data_len = sgde->length;
3144 if ((data_len & (blksize - 1)) == 0)
3145 chk_guard = 1;
3146 }
3147 }
3148
3149 /* Goto the next Protection data segment */
3150 sgpe = sg_next(sgpe);
3151 if (sgpe) {
3152 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
3153 len = sgpe->length;
3154 } else {
3155 src = NULL;
3156 }
3157 protsegcnt--;
3158 }
3159 }
3160out:
3161 if (err_type == BGS_GUARD_ERR_MASK) {
3162 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3163 0x10, 0x1);
3164 cmd->result = DRIVER_SENSE << 24
3165 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3166 phba->bg_guard_err_cnt++;
3167 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3168 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
3169 (unsigned long)scsi_get_lba(cmd),
3170 sum, guard_tag);
3171
3172 } else if (err_type == BGS_REFTAG_ERR_MASK) {
3173 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3174 0x10, 0x3);
3175 cmd->result = DRIVER_SENSE << 24
3176 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3177
3178 phba->bg_reftag_err_cnt++;
3179 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3180 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
3181 (unsigned long)scsi_get_lba(cmd),
3182 ref_tag, start_ref_tag);
3183
3184 } else if (err_type == BGS_APPTAG_ERR_MASK) {
3185 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3186 0x10, 0x2);
3187 cmd->result = DRIVER_SENSE << 24
3188 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3189
3190 phba->bg_apptag_err_cnt++;
3191 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3192 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
3193 (unsigned long)scsi_get_lba(cmd),
3194 app_tag, start_app_tag);
3195 }
3196}
3197
3198
3199/*
2823 * This function checks for BlockGuard errors detected by 3200 * This function checks for BlockGuard errors detected by
2824 * the HBA. In case of errors, the ASC/ASCQ fields in the 3201 * the HBA. In case of errors, the ASC/ASCQ fields in the
2825 * sense buffer will be set accordingly, paired with 3202 * sense buffer will be set accordingly, paired with
@@ -2842,12 +3219,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2842 uint32_t bgstat = bgf->bgstat; 3219 uint32_t bgstat = bgf->bgstat;
2843 uint64_t failing_sector = 0; 3220 uint64_t failing_sector = 0;
2844 3221
2845 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
2846 " 0x%x lba 0x%llx blk cnt 0x%x "
2847 "bgstat=0x%x bghm=0x%x\n",
2848 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
2849 blk_rq_sectors(cmd->request), bgstat, bghm);
2850
2851 spin_lock(&_dump_buf_lock); 3222 spin_lock(&_dump_buf_lock);
2852 if (!_dump_buf_done) { 3223 if (!_dump_buf_done) {
2853 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving" 3224 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
@@ -2870,18 +3241,24 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2870 3241
2871 if (lpfc_bgs_get_invalid_prof(bgstat)) { 3242 if (lpfc_bgs_get_invalid_prof(bgstat)) {
2872 cmd->result = ScsiResult(DID_ERROR, 0); 3243 cmd->result = ScsiResult(DID_ERROR, 0);
2873 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid" 3244 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2874 " BlockGuard profile. bgstat:0x%x\n", 3245 "9072 BLKGRD: Invalid BG Profile in cmd"
2875 bgstat); 3246 " 0x%x lba 0x%llx blk cnt 0x%x "
3247 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3248 (unsigned long long)scsi_get_lba(cmd),
3249 blk_rq_sectors(cmd->request), bgstat, bghm);
2876 ret = (-1); 3250 ret = (-1);
2877 goto out; 3251 goto out;
2878 } 3252 }
2879 3253
2880 if (lpfc_bgs_get_uninit_dif_block(bgstat)) { 3254 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
2881 cmd->result = ScsiResult(DID_ERROR, 0); 3255 cmd->result = ScsiResult(DID_ERROR, 0);
2882 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: " 3256 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2883 "Invalid BlockGuard DIF Block. bgstat:0x%x\n", 3257 "9073 BLKGRD: Invalid BG PDIF Block in cmd"
2884 bgstat); 3258 " 0x%x lba 0x%llx blk cnt 0x%x "
3259 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3260 (unsigned long long)scsi_get_lba(cmd),
3261 blk_rq_sectors(cmd->request), bgstat, bghm);
2885 ret = (-1); 3262 ret = (-1);
2886 goto out; 3263 goto out;
2887 } 3264 }
@@ -2894,8 +3271,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2894 cmd->result = DRIVER_SENSE << 24 3271 cmd->result = DRIVER_SENSE << 24
2895 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 3272 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2896 phba->bg_guard_err_cnt++; 3273 phba->bg_guard_err_cnt++;
2897 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3274 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2898 "9055 BLKGRD: guard_tag error\n"); 3275 "9055 BLKGRD: Guard Tag error in cmd"
3276 " 0x%x lba 0x%llx blk cnt 0x%x "
3277 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3278 (unsigned long long)scsi_get_lba(cmd),
3279 blk_rq_sectors(cmd->request), bgstat, bghm);
2899 } 3280 }
2900 3281
2901 if (lpfc_bgs_get_reftag_err(bgstat)) { 3282 if (lpfc_bgs_get_reftag_err(bgstat)) {
@@ -2907,8 +3288,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2907 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 3288 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2908 3289
2909 phba->bg_reftag_err_cnt++; 3290 phba->bg_reftag_err_cnt++;
2910 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3291 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2911 "9056 BLKGRD: ref_tag error\n"); 3292 "9056 BLKGRD: Ref Tag error in cmd"
3293 " 0x%x lba 0x%llx blk cnt 0x%x "
3294 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3295 (unsigned long long)scsi_get_lba(cmd),
3296 blk_rq_sectors(cmd->request), bgstat, bghm);
2912 } 3297 }
2913 3298
2914 if (lpfc_bgs_get_apptag_err(bgstat)) { 3299 if (lpfc_bgs_get_apptag_err(bgstat)) {
@@ -2920,8 +3305,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2920 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 3305 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2921 3306
2922 phba->bg_apptag_err_cnt++; 3307 phba->bg_apptag_err_cnt++;
2923 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3308 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2924 "9061 BLKGRD: app_tag error\n"); 3309 "9061 BLKGRD: App Tag error in cmd"
3310 " 0x%x lba 0x%llx blk cnt 0x%x "
3311 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3312 (unsigned long long)scsi_get_lba(cmd),
3313 blk_rq_sectors(cmd->request), bgstat, bghm);
2925 } 3314 }
2926 3315
2927 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 3316 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -2960,11 +3349,16 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2960 3349
2961 if (!ret) { 3350 if (!ret) {
2962 /* No error was reported - problem in FW? */ 3351 /* No error was reported - problem in FW? */
2963 cmd->result = ScsiResult(DID_ERROR, 0); 3352 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2964 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3353 "9057 BLKGRD: Unknown error in cmd"
2965 "9057 BLKGRD: Unknown error reported!\n"); 3354 " 0x%x lba 0x%llx blk cnt 0x%x "
3355 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3356 (unsigned long long)scsi_get_lba(cmd),
3357 blk_rq_sectors(cmd->request), bgstat, bghm);
3358
3359 /* Calcuate what type of error it was */
3360 lpfc_calc_bg_err(phba, lpfc_cmd);
2966 } 3361 }
2967
2968out: 3362out:
2969 return ret; 3363 return ret;
2970} 3364}
@@ -3028,6 +3422,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3028 "dma_map_sg. Config %d, seg_cnt %d\n", 3422 "dma_map_sg. Config %d, seg_cnt %d\n",
3029 __func__, phba->cfg_sg_seg_cnt, 3423 __func__, phba->cfg_sg_seg_cnt,
3030 lpfc_cmd->seg_cnt); 3424 lpfc_cmd->seg_cnt);
3425 lpfc_cmd->seg_cnt = 0;
3031 scsi_dma_unmap(scsi_cmnd); 3426 scsi_dma_unmap(scsi_cmnd);
3032 return 1; 3427 return 1;
3033 } 3428 }
@@ -3094,45 +3489,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3094} 3489}
3095 3490
3096/** 3491/**
3097 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
3098 * @phba: The Hba for which this call is being executed.
3099 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
3100 *
3101 * Adjust the data length to account for how much data
3102 * is actually on the wire.
3103 *
3104 * returns the adjusted data length
3105 **/
3106static int
3107lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
3108 struct lpfc_scsi_buf *lpfc_cmd)
3109{
3110 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
3111 int diflen, fcpdl;
3112 unsigned blksize;
3113
3114 fcpdl = scsi_bufflen(sc);
3115
3116 /* Check if there is protection data on the wire */
3117 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
3118 /* Read */
3119 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
3120 return fcpdl;
3121
3122 } else {
3123 /* Write */
3124 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
3125 return fcpdl;
3126 }
3127
3128 /* If protection data on the wire, adjust the count accordingly */
3129 blksize = lpfc_cmd_blksize(sc);
3130 diflen = (fcpdl / blksize) * 8;
3131 fcpdl += diflen;
3132 return fcpdl;
3133}
3134
3135/**
3136 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 3492 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3137 * @phba: The Hba for which this call is being executed. 3493 * @phba: The Hba for which this call is being executed.
3138 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3494 * @lpfc_cmd: The scsi buffer which is going to be mapped.
@@ -3149,14 +3505,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3149 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 3505 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3150 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl); 3506 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
3151 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 3507 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3152 uint32_t num_bde = 0; 3508 uint32_t num_sge = 0;
3153 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 3509 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3154 int prot_group_type = 0; 3510 int prot_group_type = 0;
3155 int fcpdl; 3511 int fcpdl;
3156 3512
3157 /* 3513 /*
3158 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd 3514 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3159 * fcp_rsp regions to the first data bde entry 3515 * fcp_rsp regions to the first data sge entry
3160 */ 3516 */
3161 if (scsi_sg_count(scsi_cmnd)) { 3517 if (scsi_sg_count(scsi_cmnd)) {
3162 /* 3518 /*
@@ -3179,28 +3535,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3179 3535
3180 sgl += 1; 3536 sgl += 1;
3181 lpfc_cmd->seg_cnt = datasegcnt; 3537 lpfc_cmd->seg_cnt = datasegcnt;
3182 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 3538
3183 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3539 /* First check if data segment count from SCSI Layer is good */
3184 "9087 BLKGRD: %s: Too many sg segments" 3540 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
3185 " from dma_map_sg. Config %d, seg_cnt" 3541 goto err;
3186 " %d\n",
3187 __func__, phba->cfg_sg_seg_cnt,
3188 lpfc_cmd->seg_cnt);
3189 scsi_dma_unmap(scsi_cmnd);
3190 return 1;
3191 }
3192 3542
3193 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 3543 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3194 3544
3195 switch (prot_group_type) { 3545 switch (prot_group_type) {
3196 case LPFC_PG_TYPE_NO_DIF: 3546 case LPFC_PG_TYPE_NO_DIF:
3197 num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, 3547 /* Here we need to add a DISEED to the count */
3548 if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
3549 goto err;
3550
3551 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3198 datasegcnt); 3552 datasegcnt);
3553
3199 /* we should have 2 or more entries in buffer list */ 3554 /* we should have 2 or more entries in buffer list */
3200 if (num_bde < 2) 3555 if (num_sge < 2)
3201 goto err; 3556 goto err;
3202 break; 3557 break;
3203 case LPFC_PG_TYPE_DIF_BUF:{ 3558
3559 case LPFC_PG_TYPE_DIF_BUF:
3204 /* 3560 /*
3205 * This type indicates that protection buffers are 3561 * This type indicates that protection buffers are
3206 * passed to the driver, so that needs to be prepared 3562 * passed to the driver, so that needs to be prepared
@@ -3215,31 +3571,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3215 } 3571 }
3216 3572
3217 lpfc_cmd->prot_seg_cnt = protsegcnt; 3573 lpfc_cmd->prot_seg_cnt = protsegcnt;
3218 if (lpfc_cmd->prot_seg_cnt 3574 /*
3219 > phba->cfg_prot_sg_seg_cnt) { 3575 * There is a minimun of 3 SGEs used for every
3220 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3576 * protection data segment.
3221 "9088 BLKGRD: %s: Too many prot sg " 3577 */
3222 "segments from dma_map_sg. Config %d," 3578 if ((lpfc_cmd->prot_seg_cnt * 3) >
3223 "prot_seg_cnt %d\n", __func__, 3579 (phba->cfg_total_seg_cnt - 2))
3224 phba->cfg_prot_sg_seg_cnt, 3580 goto err;
3225 lpfc_cmd->prot_seg_cnt);
3226 dma_unmap_sg(&phba->pcidev->dev,
3227 scsi_prot_sglist(scsi_cmnd),
3228 scsi_prot_sg_count(scsi_cmnd),
3229 datadir);
3230 scsi_dma_unmap(scsi_cmnd);
3231 return 1;
3232 }
3233 3581
3234 num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, 3582 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3235 datasegcnt, protsegcnt); 3583 datasegcnt, protsegcnt);
3584
3236 /* we should have 3 or more entries in buffer list */ 3585 /* we should have 3 or more entries in buffer list */
3237 if (num_bde < 3) 3586 if ((num_sge < 3) ||
3587 (num_sge > phba->cfg_total_seg_cnt))
3238 goto err; 3588 goto err;
3239 break; 3589 break;
3240 } 3590
3241 case LPFC_PG_TYPE_INVALID: 3591 case LPFC_PG_TYPE_INVALID:
3242 default: 3592 default:
3593 scsi_dma_unmap(scsi_cmnd);
3594 lpfc_cmd->seg_cnt = 0;
3595
3243 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 3596 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3244 "9083 Unexpected protection group %i\n", 3597 "9083 Unexpected protection group %i\n",
3245 prot_group_type); 3598 prot_group_type);
@@ -3263,7 +3616,6 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3263 } 3616 }
3264 3617
3265 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); 3618 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3266
3267 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 3619 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3268 3620
3269 /* 3621 /*
@@ -3274,10 +3626,22 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3274 3626
3275 return 0; 3627 return 0;
3276err: 3628err:
3629 if (lpfc_cmd->seg_cnt)
3630 scsi_dma_unmap(scsi_cmnd);
3631 if (lpfc_cmd->prot_seg_cnt)
3632 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3633 scsi_prot_sg_count(scsi_cmnd),
3634 scsi_cmnd->sc_data_direction);
3635
3277 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 3636 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3278 "9084 Could not setup all needed BDE's" 3637 "9084 Cannot setup S/G List for HBA"
3279 "prot_group_type=%d, num_bde=%d\n", 3638 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3280 prot_group_type, num_bde); 3639 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3640 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3641 prot_group_type, num_sge);
3642
3643 lpfc_cmd->seg_cnt = 0;
3644 lpfc_cmd->prot_seg_cnt = 0;
3281 return 1; 3645 return 1;
3282} 3646}
3283 3647
@@ -4357,7 +4721,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4357 4721
4358 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 4722 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4359 if (vport->phba->cfg_enable_bg) { 4723 if (vport->phba->cfg_enable_bg) {
4360 lpfc_printf_vlog(vport, KERN_INFO, LOG_BG, 4724 lpfc_printf_vlog(vport,
4725 KERN_INFO, LOG_SCSI_CMD,
4361 "9033 BLKGRD: rcvd %s cmd:x%x " 4726 "9033 BLKGRD: rcvd %s cmd:x%x "
4362 "sector x%llx cnt %u pt %x\n", 4727 "sector x%llx cnt %u pt %x\n",
4363 dif_op_str[scsi_get_prot_op(cmnd)], 4728 dif_op_str[scsi_get_prot_op(cmnd)],
@@ -4369,7 +4734,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4369 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 4734 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4370 } else { 4735 } else {
4371 if (vport->phba->cfg_enable_bg) { 4736 if (vport->phba->cfg_enable_bg) {
4372 lpfc_printf_vlog(vport, KERN_INFO, LOG_BG, 4737 lpfc_printf_vlog(vport,
4738 KERN_INFO, LOG_SCSI_CMD,
4373 "9038 BLKGRD: rcvd PROT_NORMAL cmd: " 4739 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4374 "x%x sector x%llx cnt %u pt %x\n", 4740 "x%x sector x%llx cnt %u pt %x\n",
4375 cmnd->cmnd[0], 4741 cmnd->cmnd[0],
@@ -4542,7 +4908,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4542 /* Wait for abort to complete */ 4908 /* Wait for abort to complete */
4543 wait_event_timeout(waitq, 4909 wait_event_timeout(waitq,
4544 (lpfc_cmd->pCmd != cmnd), 4910 (lpfc_cmd->pCmd != cmnd),
4545 (2*vport->cfg_devloss_tmo*HZ)); 4911 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4546 lpfc_cmd->waitq = NULL; 4912 lpfc_cmd->waitq = NULL;
4547 4913
4548 if (lpfc_cmd->pCmd == cmnd) { 4914 if (lpfc_cmd->pCmd == cmnd) {
@@ -5012,16 +5378,24 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5012 struct lpfc_hba *phba = vport->phba; 5378 struct lpfc_hba *phba = vport->phba;
5013 int rc, ret = SUCCESS; 5379 int rc, ret = SUCCESS;
5014 5380
5381 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5382 "3172 SCSI layer issued Host Reset Data:\n");
5383
5015 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 5384 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5016 lpfc_offline(phba); 5385 lpfc_offline(phba);
5017 rc = lpfc_sli_brdrestart(phba); 5386 rc = lpfc_sli_brdrestart(phba);
5018 if (rc) 5387 if (rc)
5019 ret = FAILED; 5388 ret = FAILED;
5020 lpfc_online(phba); 5389 rc = lpfc_online(phba);
5390 if (rc)
5391 ret = FAILED;
5021 lpfc_unblock_mgmt_io(phba); 5392 lpfc_unblock_mgmt_io(phba);
5022 5393
5023 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 5394 if (ret == FAILED) {
5024 "3172 SCSI layer issued Host Reset Data: x%x\n", ret); 5395 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5396 "3323 Failed host reset, bring it offline\n");
5397 lpfc_sli4_offline_eratt(phba);
5398 }
5025 return ret; 5399 return ret;
5026} 5400}
5027 5401
@@ -5088,11 +5462,11 @@ lpfc_slave_alloc(struct scsi_device *sdev)
5088 } 5462 }
5089 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); 5463 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
5090 if (num_to_alloc != num_allocated) { 5464 if (num_to_alloc != num_allocated) {
5091 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5465 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5092 "0708 Allocation request of %d " 5466 "0708 Allocation request of %d "
5093 "command buffers did not succeed. " 5467 "command buffers did not succeed. "
5094 "Allocated %d buffers.\n", 5468 "Allocated %d buffers.\n",
5095 num_to_alloc, num_allocated); 5469 num_to_alloc, num_allocated);
5096 } 5470 }
5097 if (num_allocated > 0) 5471 if (num_allocated > 0)
5098 phba->total_scsi_bufs += num_allocated; 5472 phba->total_scsi_bufs += num_allocated;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 35dd17eb0f27..572579f87de4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -667,7 +667,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
667 667
668 spin_lock_irqsave(&phba->hbalock, iflags); 668 spin_lock_irqsave(&phba->hbalock, iflags);
669 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 669 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
670 next_time = jiffies + HZ * (phba->fc_ratov + 1); 670 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
671 list_for_each_entry_safe(rrq, nextrrq, 671 list_for_each_entry_safe(rrq, nextrrq,
672 &phba->active_rrq_list, list) { 672 &phba->active_rrq_list, list) {
673 if (time_after(jiffies, rrq->rrq_stop_time)) 673 if (time_after(jiffies, rrq->rrq_stop_time))
@@ -782,7 +782,7 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
782 return; 782 return;
783 spin_lock_irqsave(&phba->hbalock, iflags); 783 spin_lock_irqsave(&phba->hbalock, iflags);
784 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 784 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
785 next_time = jiffies + HZ * (phba->fc_ratov * 2); 785 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2));
786 list_splice_init(&phba->active_rrq_list, &rrq_list); 786 list_splice_init(&phba->active_rrq_list, &rrq_list);
787 spin_unlock_irqrestore(&phba->hbalock, iflags); 787 spin_unlock_irqrestore(&phba->hbalock, iflags);
788 788
@@ -878,7 +878,8 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
878 else 878 else
879 rrq->send_rrq = 0; 879 rrq->send_rrq = 0;
880 rrq->xritag = xritag; 880 rrq->xritag = xritag;
881 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); 881 rrq->rrq_stop_time = jiffies +
882 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
882 rrq->ndlp = ndlp; 883 rrq->ndlp = ndlp;
883 rrq->nlp_DID = ndlp->nlp_DID; 884 rrq->nlp_DID = ndlp->nlp_DID;
884 rrq->vport = ndlp->vport; 885 rrq->vport = ndlp->vport;
@@ -926,8 +927,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
926 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 927 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
927 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) 928 !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
928 ndlp = piocbq->context_un.ndlp; 929 ndlp = piocbq->context_un.ndlp;
929 else if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) && 930 else if (piocbq->iocb_flag & LPFC_IO_LIBDFC)
930 (piocbq->iocb_flag & LPFC_IO_LIBDFC))
931 ndlp = piocbq->context_un.ndlp; 931 ndlp = piocbq->context_un.ndlp;
932 else 932 else
933 ndlp = piocbq->context1; 933 ndlp = piocbq->context1;
@@ -1339,7 +1339,8 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1339 BUG(); 1339 BUG();
1340 else 1340 else
1341 mod_timer(&piocb->vport->els_tmofunc, 1341 mod_timer(&piocb->vport->els_tmofunc,
1342 jiffies + HZ * (phba->fc_ratov << 1)); 1342 jiffies +
1343 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1343 } 1344 }
1344 1345
1345 1346
@@ -2340,7 +2341,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2340 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2341 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2341 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2342 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2342 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " 2343 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2343 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 2344 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2345 "x%x x%x x%x\n",
2344 pmb->vport ? pmb->vport->vpi : 0, 2346 pmb->vport ? pmb->vport->vpi : 0,
2345 pmbox->mbxCommand, 2347 pmbox->mbxCommand,
2346 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2348 lpfc_sli_config_mbox_subsys_get(phba, pmb),
@@ -2354,7 +2356,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2354 pmbox->un.varWords[4], 2356 pmbox->un.varWords[4],
2355 pmbox->un.varWords[5], 2357 pmbox->un.varWords[5],
2356 pmbox->un.varWords[6], 2358 pmbox->un.varWords[6],
2357 pmbox->un.varWords[7]); 2359 pmbox->un.varWords[7],
2360 pmbox->un.varWords[8],
2361 pmbox->un.varWords[9],
2362 pmbox->un.varWords[10]);
2358 2363
2359 if (pmb->mbox_cmpl) 2364 if (pmb->mbox_cmpl)
2360 pmb->mbox_cmpl(phba,pmb); 2365 pmb->mbox_cmpl(phba,pmb);
@@ -2908,8 +2913,9 @@ void lpfc_poll_eratt(unsigned long ptr)
2908 lpfc_worker_wake_up(phba); 2913 lpfc_worker_wake_up(phba);
2909 else 2914 else
2910 /* Restart the timer for next eratt poll */ 2915 /* Restart the timer for next eratt poll */
2911 mod_timer(&phba->eratt_poll, jiffies + 2916 mod_timer(&phba->eratt_poll,
2912 HZ * LPFC_ERATT_POLL_INTERVAL); 2917 jiffies +
2918 msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
2913 return; 2919 return;
2914} 2920}
2915 2921
@@ -5511,6 +5517,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5511 list_del_init(&rsrc_blk->list); 5517 list_del_init(&rsrc_blk->list);
5512 kfree(rsrc_blk); 5518 kfree(rsrc_blk);
5513 } 5519 }
5520 phba->sli4_hba.max_cfg_param.vpi_used = 0;
5514 break; 5521 break;
5515 case LPFC_RSC_TYPE_FCOE_XRI: 5522 case LPFC_RSC_TYPE_FCOE_XRI:
5516 kfree(phba->sli4_hba.xri_bmask); 5523 kfree(phba->sli4_hba.xri_bmask);
@@ -5811,6 +5818,7 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5811 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 5818 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5812 } else { 5819 } else {
5813 kfree(phba->vpi_bmask); 5820 kfree(phba->vpi_bmask);
5821 phba->sli4_hba.max_cfg_param.vpi_used = 0;
5814 kfree(phba->vpi_ids); 5822 kfree(phba->vpi_ids);
5815 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5823 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5816 kfree(phba->sli4_hba.xri_bmask); 5824 kfree(phba->sli4_hba.xri_bmask);
@@ -5992,7 +6000,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
5992 struct lpfc_sglq *sglq_entry = NULL; 6000 struct lpfc_sglq *sglq_entry = NULL;
5993 struct lpfc_sglq *sglq_entry_next = NULL; 6001 struct lpfc_sglq *sglq_entry_next = NULL;
5994 struct lpfc_sglq *sglq_entry_first = NULL; 6002 struct lpfc_sglq *sglq_entry_first = NULL;
5995 int status, post_cnt = 0, num_posted = 0, block_cnt = 0; 6003 int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
5996 int last_xritag = NO_XRI; 6004 int last_xritag = NO_XRI;
5997 LIST_HEAD(prep_sgl_list); 6005 LIST_HEAD(prep_sgl_list);
5998 LIST_HEAD(blck_sgl_list); 6006 LIST_HEAD(blck_sgl_list);
@@ -6004,6 +6012,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6004 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); 6012 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
6005 spin_unlock_irq(&phba->hbalock); 6013 spin_unlock_irq(&phba->hbalock);
6006 6014
6015 total_cnt = phba->sli4_hba.els_xri_cnt;
6007 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 6016 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6008 &allc_sgl_list, list) { 6017 &allc_sgl_list, list) {
6009 list_del_init(&sglq_entry->list); 6018 list_del_init(&sglq_entry->list);
@@ -6055,9 +6064,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6055 sglq_entry->sli4_xritag); 6064 sglq_entry->sli4_xritag);
6056 list_add_tail(&sglq_entry->list, 6065 list_add_tail(&sglq_entry->list,
6057 &free_sgl_list); 6066 &free_sgl_list);
6058 spin_lock_irq(&phba->hbalock); 6067 total_cnt--;
6059 phba->sli4_hba.els_xri_cnt--;
6060 spin_unlock_irq(&phba->hbalock);
6061 } 6068 }
6062 } 6069 }
6063 } 6070 }
@@ -6085,9 +6092,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6085 (sglq_entry_first->sli4_xritag + 6092 (sglq_entry_first->sli4_xritag +
6086 post_cnt - 1)); 6093 post_cnt - 1));
6087 list_splice_init(&blck_sgl_list, &free_sgl_list); 6094 list_splice_init(&blck_sgl_list, &free_sgl_list);
6088 spin_lock_irq(&phba->hbalock); 6095 total_cnt -= post_cnt;
6089 phba->sli4_hba.els_xri_cnt -= post_cnt;
6090 spin_unlock_irq(&phba->hbalock);
6091 } 6096 }
6092 6097
6093 /* don't reset xirtag due to hole in xri block */ 6098 /* don't reset xirtag due to hole in xri block */
@@ -6097,6 +6102,8 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6097 /* reset els sgl post count for next round of posting */ 6102 /* reset els sgl post count for next round of posting */
6098 post_cnt = 0; 6103 post_cnt = 0;
6099 } 6104 }
6105 /* update the number of XRIs posted for ELS */
6106 phba->sli4_hba.els_xri_cnt = total_cnt;
6100 6107
6101 /* free the els sgls failed to post */ 6108 /* free the els sgls failed to post */
6102 lpfc_free_sgl_list(phba, &free_sgl_list); 6109 lpfc_free_sgl_list(phba, &free_sgl_list);
@@ -6446,16 +6453,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6446 6453
6447 /* Start the ELS watchdog timer */ 6454 /* Start the ELS watchdog timer */
6448 mod_timer(&vport->els_tmofunc, 6455 mod_timer(&vport->els_tmofunc,
6449 jiffies + HZ * (phba->fc_ratov * 2)); 6456 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
6450 6457
6451 /* Start heart beat timer */ 6458 /* Start heart beat timer */
6452 mod_timer(&phba->hb_tmofunc, 6459 mod_timer(&phba->hb_tmofunc,
6453 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 6460 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
6454 phba->hb_outstanding = 0; 6461 phba->hb_outstanding = 0;
6455 phba->last_completion_time = jiffies; 6462 phba->last_completion_time = jiffies;
6456 6463
6457 /* Start error attention (ERATT) polling timer */ 6464 /* Start error attention (ERATT) polling timer */
6458 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 6465 mod_timer(&phba->eratt_poll,
6466 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
6459 6467
6460 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 6468 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
6461 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 6469 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
@@ -6822,8 +6830,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
6822 goto out_not_finished; 6830 goto out_not_finished;
6823 } 6831 }
6824 /* timeout active mbox command */ 6832 /* timeout active mbox command */
6825 mod_timer(&psli->mbox_tmo, (jiffies + 6833 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
6826 (HZ * lpfc_mbox_tmo_val(phba, pmbox)))); 6834 1000);
6835 mod_timer(&psli->mbox_tmo, jiffies + timeout);
6827 } 6836 }
6828 6837
6829 /* Mailbox cmd <cmd> issue */ 6838 /* Mailbox cmd <cmd> issue */
@@ -7496,7 +7505,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
7496 7505
7497 /* Start timer for the mbox_tmo and log some mailbox post messages */ 7506 /* Start timer for the mbox_tmo and log some mailbox post messages */
7498 mod_timer(&psli->mbox_tmo, (jiffies + 7507 mod_timer(&psli->mbox_tmo, (jiffies +
7499 (HZ * lpfc_mbox_tmo_val(phba, mboxq)))); 7508 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
7500 7509
7501 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7510 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7502 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 7511 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
@@ -7914,15 +7923,21 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7914static inline uint32_t 7923static inline uint32_t
7915lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) 7924lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
7916{ 7925{
7917 int i; 7926 struct lpfc_vector_map_info *cpup;
7918 7927 int chann, cpu;
7919 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
7920 i = smp_processor_id();
7921 else
7922 i = atomic_add_return(1, &phba->fcp_qidx);
7923 7928
7924 i = (i % phba->cfg_fcp_io_channel); 7929 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) {
7925 return i; 7930 cpu = smp_processor_id();
7931 if (cpu < phba->sli4_hba.num_present_cpu) {
7932 cpup = phba->sli4_hba.cpu_map;
7933 cpup += cpu;
7934 return cpup->channel_id;
7935 }
7936 chann = cpu;
7937 }
7938 chann = atomic_add_return(1, &phba->fcp_qidx);
7939 chann = (chann % phba->cfg_fcp_io_channel);
7940 return chann;
7926} 7941}
7927 7942
7928/** 7943/**
@@ -8444,10 +8459,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8444 8459
8445 if ((piocb->iocb_flag & LPFC_IO_FCP) || 8460 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8446 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 8461 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
8462 if (unlikely(!phba->sli4_hba.fcp_wq))
8463 return IOCB_ERROR;
8447 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 8464 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
8448 &wqe)) 8465 &wqe))
8449 return IOCB_ERROR; 8466 return IOCB_ERROR;
8450 } else { 8467 } else {
8468 if (unlikely(!phba->sli4_hba.els_wq))
8469 return IOCB_ERROR;
8451 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 8470 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
8452 return IOCB_ERROR; 8471 return IOCB_ERROR;
8453 } 8472 }
@@ -10003,7 +10022,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
10003 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 10022 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
10004 SLI_IOCB_RET_IOCB); 10023 SLI_IOCB_RET_IOCB);
10005 if (retval == IOCB_SUCCESS) { 10024 if (retval == IOCB_SUCCESS) {
10006 timeout_req = timeout * HZ; 10025 timeout_req = msecs_to_jiffies(timeout * 1000);
10007 timeleft = wait_event_timeout(done_q, 10026 timeleft = wait_event_timeout(done_q,
10008 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 10027 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
10009 timeout_req); 10028 timeout_req);
@@ -10108,7 +10127,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
10108 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 10127 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
10109 wait_event_interruptible_timeout(done_q, 10128 wait_event_interruptible_timeout(done_q,
10110 pmboxq->mbox_flag & LPFC_MBX_WAKE, 10129 pmboxq->mbox_flag & LPFC_MBX_WAKE,
10111 timeout * HZ); 10130 msecs_to_jiffies(timeout * 1000));
10112 10131
10113 spin_lock_irqsave(&phba->hbalock, flag); 10132 spin_lock_irqsave(&phba->hbalock, flag);
10114 pmboxq->context1 = NULL; 10133 pmboxq->context1 = NULL;
@@ -12899,8 +12918,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12899 } 12918 }
12900 wq->db_regaddr = bar_memmap_p + db_offset; 12919 wq->db_regaddr = bar_memmap_p + db_offset;
12901 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12920 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12902 "3264 WQ[%d]: barset:x%x, offset:x%x\n", 12921 "3264 WQ[%d]: barset:x%x, offset:x%x, "
12903 wq->queue_id, pci_barset, db_offset); 12922 "format:x%x\n", wq->queue_id, pci_barset,
12923 db_offset, wq->db_format);
12904 } else { 12924 } else {
12905 wq->db_format = LPFC_DB_LIST_FORMAT; 12925 wq->db_format = LPFC_DB_LIST_FORMAT;
12906 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 12926 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
@@ -13120,8 +13140,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
13120 } 13140 }
13121 hrq->db_regaddr = bar_memmap_p + db_offset; 13141 hrq->db_regaddr = bar_memmap_p + db_offset;
13122 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13142 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13123 "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n", 13143 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
13124 hrq->queue_id, pci_barset, db_offset); 13144 "format:x%x\n", hrq->queue_id, pci_barset,
13145 db_offset, hrq->db_format);
13125 } else { 13146 } else {
13126 hrq->db_format = LPFC_DB_RING_FORMAT; 13147 hrq->db_format = LPFC_DB_RING_FORMAT;
13127 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 13148 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
@@ -13971,13 +13992,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
13971 } 13992 }
13972 13993
13973 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 13994 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
13974 "2538 Received frame rctl:%s type:%s " 13995 "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
13975 "Frame Data:%08x %08x %08x %08x %08x %08x\n", 13996 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
13976 rctl_names[fc_hdr->fh_r_ctl], 13997 rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
13977 type_names[fc_hdr->fh_type], 13998 type_names[fc_hdr->fh_type], fc_hdr->fh_type,
13978 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 13999 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
13979 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 14000 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
13980 be32_to_cpu(header[4]), be32_to_cpu(header[5])); 14001 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
14002 be32_to_cpu(header[6]));
13981 return 0; 14003 return 0;
13982drop: 14004drop:
13983 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 14005 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index be02b59ea279..67af460184ba 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -346,11 +346,6 @@ struct lpfc_bmbx {
346#define SLI4_CT_VFI 2 346#define SLI4_CT_VFI 2
347#define SLI4_CT_FCFI 3 347#define SLI4_CT_FCFI 3
348 348
349#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000
350#define LPFC_SLI4_FL1_MAX_BUF_SIZE 0X2000
351#define LPFC_SLI4_MIN_BUF_SIZE 0x400
352#define LPFC_SLI4_MAX_BUF_SIZE 0x20000
353
354/* 349/*
355 * SLI4 specific data structures 350 * SLI4 specific data structures
356 */ 351 */
@@ -440,6 +435,17 @@ struct lpfc_sli4_lnk_info {
440 435
441#define LPFC_SLI4_HANDLER_NAME_SZ 16 436#define LPFC_SLI4_HANDLER_NAME_SZ 16
442 437
438/* Used for IRQ vector to CPU mapping */
439struct lpfc_vector_map_info {
440 uint16_t phys_id;
441 uint16_t core_id;
442 uint16_t irq;
443 uint16_t channel_id;
444 struct cpumask maskbits;
445};
446#define LPFC_VECTOR_MAP_EMPTY 0xffff
447#define LPFC_MAX_CPU 256
448
443/* SLI4 HBA data structure entries */ 449/* SLI4 HBA data structure entries */
444struct lpfc_sli4_hba { 450struct lpfc_sli4_hba {
445 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for 451 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -573,6 +579,11 @@ struct lpfc_sli4_hba {
573 struct lpfc_iov iov; 579 struct lpfc_iov iov;
574 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ 580 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
575 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ 581 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
582
583 /* CPU to vector mapping information */
584 struct lpfc_vector_map_info *cpu_map;
585 uint16_t num_online_cpu;
586 uint16_t num_present_cpu;
576}; 587};
577 588
578enum lpfc_sge_type { 589enum lpfc_sge_type {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 664cd04f7cd8..a38dc3b16969 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.38" 21#define LPFC_DRIVER_VERSION "8.3.39"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23 23
24/* Used for SLI 2/3 */ 24/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 0fe188e66000..e28e431564b0 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -80,7 +80,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
80 } 80 }
81} 81}
82 82
83static int 83int
84lpfc_alloc_vpi(struct lpfc_hba *phba) 84lpfc_alloc_vpi(struct lpfc_hba *phba)
85{ 85{
86 unsigned long vpi; 86 unsigned long vpi;
@@ -568,6 +568,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
568 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; 568 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
569 struct lpfc_hba *phba = vport->phba; 569 struct lpfc_hba *phba = vport->phba;
570 long timeout; 570 long timeout;
571 bool ns_ndlp_referenced = false;
571 572
572 if (vport->port_type == LPFC_PHYSICAL_PORT) { 573 if (vport->port_type == LPFC_PHYSICAL_PORT) {
573 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 574 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
@@ -628,6 +629,18 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
628 629
629 lpfc_debugfs_terminate(vport); 630 lpfc_debugfs_terminate(vport);
630 631
632 /*
633 * The call to fc_remove_host might release the NameServer ndlp. Since
634 * we might need to use the ndlp to send the DA_ID CT command,
635 * increment the reference for the NameServer ndlp to prevent it from
636 * being released.
637 */
638 ndlp = lpfc_findnode_did(vport, NameServer_DID);
639 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
640 lpfc_nlp_get(ndlp);
641 ns_ndlp_referenced = true;
642 }
643
631 /* Remove FC host and then SCSI host with the vport */ 644 /* Remove FC host and then SCSI host with the vport */
632 fc_remove_host(lpfc_shost_from_vport(vport)); 645 fc_remove_host(lpfc_shost_from_vport(vport));
633 scsi_remove_host(lpfc_shost_from_vport(vport)); 646 scsi_remove_host(lpfc_shost_from_vport(vport));
@@ -734,6 +747,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
734 lpfc_discovery_wait(vport); 747 lpfc_discovery_wait(vport);
735 748
736skip_logo: 749skip_logo:
750
751 /*
752 * If the NameServer ndlp has been incremented to allow the DA_ID CT
753 * command to be sent, decrement the ndlp now.
754 */
755 if (ns_ndlp_referenced) {
756 ndlp = lpfc_findnode_did(vport, NameServer_DID);
757 lpfc_nlp_put(ndlp);
758 }
759
737 lpfc_cleanup(vport); 760 lpfc_cleanup(vport);
738 lpfc_sli_host_down(vport); 761 lpfc_sli_host_down(vport);
739 762
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index 90828340acea..6b2c94eb8134 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -90,6 +90,7 @@ int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
90int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint); 90int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
91struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *); 91struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *);
92void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **); 92void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **);
93int lpfc_alloc_vpi(struct lpfc_hba *phba);
93 94
94/* 95/*
95 * queuecommand VPORT-specific return codes. Specified in the host byte code. 96 * queuecommand VPORT-specific return codes. Specified in the host byte code.
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 7c90d57b867e..3a9ddae86f1f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -4931,11 +4931,12 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
4931 printk(KERN_ERR "megaraid_sas: timed out while" 4931 printk(KERN_ERR "megaraid_sas: timed out while"
4932 "waiting for HBA to recover\n"); 4932 "waiting for HBA to recover\n");
4933 error = -ENODEV; 4933 error = -ENODEV;
4934 goto out_kfree_ioc; 4934 goto out_up;
4935 } 4935 }
4936 spin_unlock_irqrestore(&instance->hba_lock, flags); 4936 spin_unlock_irqrestore(&instance->hba_lock, flags);
4937 4937
4938 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 4938 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
4939 out_up:
4939 up(&instance->ioctl_sem); 4940 up(&instance->ioctl_sem);
4940 4941
4941 out_kfree_ioc: 4942 out_kfree_ioc:
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 74550922ad55..7b7381d7671f 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -254,7 +254,7 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
254 } 254 }
255 for (i = 0; i < MVS_MAX_DEVICES; i++) { 255 for (i = 0; i < MVS_MAX_DEVICES; i++) {
256 mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED; 256 mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
257 mvi->devices[i].dev_type = NO_DEVICE; 257 mvi->devices[i].dev_type = SAS_PHY_UNUSED;
258 mvi->devices[i].device_id = i; 258 mvi->devices[i].device_id = i;
259 mvi->devices[i].dev_status = MVS_DEV_NORMAL; 259 mvi->devices[i].dev_status = MVS_DEV_NORMAL;
260 init_timer(&mvi->devices[i].timer); 260 init_timer(&mvi->devices[i].timer);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 532110f4562a..c9e244984e30 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -706,7 +706,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
706 return 0; 706 return 0;
707} 707}
708 708
709#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE))) 709#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED)))
710static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf, 710static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
711 struct mvs_tmf_task *tmf, int *pass) 711 struct mvs_tmf_task *tmf, int *pass)
712{ 712{
@@ -726,7 +726,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
726 * libsas will use dev->port, should 726 * libsas will use dev->port, should
727 * not call task_done for sata 727 * not call task_done for sata
728 */ 728 */
729 if (dev->dev_type != SATA_DEV) 729 if (dev->dev_type != SAS_SATA_DEV)
730 task->task_done(task); 730 task->task_done(task);
731 return rc; 731 return rc;
732 } 732 }
@@ -1159,10 +1159,10 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1159 phy->identify.device_type = 1159 phy->identify.device_type =
1160 phy->att_dev_info & PORT_DEV_TYPE_MASK; 1160 phy->att_dev_info & PORT_DEV_TYPE_MASK;
1161 1161
1162 if (phy->identify.device_type == SAS_END_DEV) 1162 if (phy->identify.device_type == SAS_END_DEVICE)
1163 phy->identify.target_port_protocols = 1163 phy->identify.target_port_protocols =
1164 SAS_PROTOCOL_SSP; 1164 SAS_PROTOCOL_SSP;
1165 else if (phy->identify.device_type != NO_DEVICE) 1165 else if (phy->identify.device_type != SAS_PHY_UNUSED)
1166 phy->identify.target_port_protocols = 1166 phy->identify.target_port_protocols =
1167 SAS_PROTOCOL_SMP; 1167 SAS_PROTOCOL_SMP;
1168 if (oob_done) 1168 if (oob_done)
@@ -1260,7 +1260,7 @@ struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
1260{ 1260{
1261 u32 dev; 1261 u32 dev;
1262 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { 1262 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
1263 if (mvi->devices[dev].dev_type == NO_DEVICE) { 1263 if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) {
1264 mvi->devices[dev].device_id = dev; 1264 mvi->devices[dev].device_id = dev;
1265 return &mvi->devices[dev]; 1265 return &mvi->devices[dev];
1266 } 1266 }
@@ -1278,7 +1278,7 @@ void mvs_free_dev(struct mvs_device *mvi_dev)
1278 u32 id = mvi_dev->device_id; 1278 u32 id = mvi_dev->device_id;
1279 memset(mvi_dev, 0, sizeof(*mvi_dev)); 1279 memset(mvi_dev, 0, sizeof(*mvi_dev));
1280 mvi_dev->device_id = id; 1280 mvi_dev->device_id = id;
1281 mvi_dev->dev_type = NO_DEVICE; 1281 mvi_dev->dev_type = SAS_PHY_UNUSED;
1282 mvi_dev->dev_status = MVS_DEV_NORMAL; 1282 mvi_dev->dev_status = MVS_DEV_NORMAL;
1283 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; 1283 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
1284} 1284}
@@ -1480,7 +1480,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1480{ 1480{
1481 int rc; 1481 int rc;
1482 struct sas_phy *phy = sas_get_local_phy(dev); 1482 struct sas_phy *phy = sas_get_local_phy(dev);
1483 int reset_type = (dev->dev_type == SATA_DEV || 1483 int reset_type = (dev->dev_type == SAS_SATA_DEV ||
1484 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; 1484 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1485 rc = sas_phy_reset(phy, reset_type); 1485 rc = sas_phy_reset(phy, reset_type);
1486 sas_put_local_phy(phy); 1486 sas_put_local_phy(phy);
@@ -1629,7 +1629,7 @@ int mvs_abort_task(struct sas_task *task)
1629 1629
1630 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1630 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1631 task->task_proto & SAS_PROTOCOL_STP) { 1631 task->task_proto & SAS_PROTOCOL_STP) {
1632 if (SATA_DEV == dev->dev_type) { 1632 if (SAS_SATA_DEV == dev->dev_type) {
1633 struct mvs_slot_info *slot = task->lldd_task; 1633 struct mvs_slot_info *slot = task->lldd_task;
1634 u32 slot_idx = (u32)(slot - mvi->slot_info); 1634 u32 slot_idx = (u32)(slot - mvi->slot_info);
1635 mv_dprintk("mvs_abort_task() mvi=%p task=%p " 1635 mv_dprintk("mvs_abort_task() mvi=%p task=%p "
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 9f3cc13a5ce7..60e2fb7f2dca 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -67,7 +67,7 @@ extern const struct mvs_dispatch mvs_94xx_dispatch;
67extern struct kmem_cache *mvs_task_list_cache; 67extern struct kmem_cache *mvs_task_list_cache;
68 68
69#define DEV_IS_EXPANDER(type) \ 69#define DEV_IS_EXPANDER(type) \
70 ((type == EDGE_DEV) || (type == FANOUT_DEV)) 70 ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
71 71
72#define bit(n) ((u64)1 << n) 72#define bit(n) ((u64)1 << n)
73 73
@@ -241,7 +241,7 @@ struct mvs_phy {
241 241
242struct mvs_device { 242struct mvs_device {
243 struct list_head dev_entry; 243 struct list_head dev_entry;
244 enum sas_dev_type dev_type; 244 enum sas_device_type dev_type;
245 struct mvs_info *mvi_info; 245 struct mvs_info *mvi_info;
246 struct domain_device *sas_device; 246 struct domain_device *sas_device;
247 struct timer_list timer; 247 struct timer_list timer;
diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile
index 52f04296171c..ce4cd87c7c66 100644
--- a/drivers/scsi/pm8001/Makefile
+++ b/drivers/scsi/pm8001/Makefile
@@ -4,9 +4,10 @@
4# Copyright (C) 2008-2009 USI Co., Ltd. 4# Copyright (C) 2008-2009 USI Co., Ltd.
5 5
6 6
7obj-$(CONFIG_SCSI_PM8001) += pm8001.o 7obj-$(CONFIG_SCSI_PM8001) += pm80xx.o
8pm8001-y += pm8001_init.o \ 8pm80xx-y += pm8001_init.o \
9 pm8001_sas.o \ 9 pm8001_sas.o \
10 pm8001_ctl.o \ 10 pm8001_ctl.o \
11 pm8001_hwi.o 11 pm8001_hwi.o \
12 pm80xx_hwi.o
12 13
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 45bc197bc22f..d99f41c2ca13 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver 2 * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
3 * 3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd. 4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved. 5 * All rights reserved.
@@ -58,8 +58,13 @@ static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev,
58 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 58 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
59 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 59 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
60 60
61 return snprintf(buf, PAGE_SIZE, "%d\n", 61 if (pm8001_ha->chip_id == chip_8001) {
62 pm8001_ha->main_cfg_tbl.interface_rev); 62 return snprintf(buf, PAGE_SIZE, "%d\n",
63 pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev);
64 } else {
65 return snprintf(buf, PAGE_SIZE, "%d\n",
66 pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev);
67 }
63} 68}
64static 69static
65DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL); 70DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
@@ -78,11 +83,19 @@ static ssize_t pm8001_ctl_fw_version_show(struct device *cdev,
78 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 83 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
79 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 84 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
80 85
81 return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", 86 if (pm8001_ha->chip_id == chip_8001) {
82 (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 24), 87 return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
83 (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 16), 88 (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24),
84 (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 8), 89 (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16),
85 (u8)(pm8001_ha->main_cfg_tbl.firmware_rev)); 90 (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8),
91 (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev));
92 } else {
93 return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
94 (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24),
95 (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16),
96 (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8),
97 (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev));
98 }
86} 99}
87static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL); 100static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL);
88/** 101/**
@@ -99,8 +112,13 @@ static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev,
99 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 112 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
100 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 113 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
101 114
102 return snprintf(buf, PAGE_SIZE, "%d\n", 115 if (pm8001_ha->chip_id == chip_8001) {
103 pm8001_ha->main_cfg_tbl.max_out_io); 116 return snprintf(buf, PAGE_SIZE, "%d\n",
117 pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io);
118 } else {
119 return snprintf(buf, PAGE_SIZE, "%d\n",
120 pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io);
121 }
104} 122}
105static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL); 123static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL);
106/** 124/**
@@ -117,8 +135,15 @@ static ssize_t pm8001_ctl_max_devices_show(struct device *cdev,
117 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 135 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
118 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 136 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
119 137
120 return snprintf(buf, PAGE_SIZE, "%04d\n", 138 if (pm8001_ha->chip_id == chip_8001) {
121 (u16)(pm8001_ha->main_cfg_tbl.max_sgl >> 16)); 139 return snprintf(buf, PAGE_SIZE, "%04d\n",
140 (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16)
141 );
142 } else {
143 return snprintf(buf, PAGE_SIZE, "%04d\n",
144 (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16)
145 );
146 }
122} 147}
123static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL); 148static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL);
124/** 149/**
@@ -136,8 +161,15 @@ static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev,
136 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 161 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
137 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 162 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
138 163
139 return snprintf(buf, PAGE_SIZE, "%04d\n", 164 if (pm8001_ha->chip_id == chip_8001) {
140 pm8001_ha->main_cfg_tbl.max_sgl & 0x0000FFFF); 165 return snprintf(buf, PAGE_SIZE, "%04d\n",
166 pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF
167 );
168 } else {
169 return snprintf(buf, PAGE_SIZE, "%04d\n",
170 pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF
171 );
172 }
141} 173}
142static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL); 174static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL);
143 175
@@ -173,7 +205,14 @@ static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev,
173 struct Scsi_Host *shost = class_to_shost(cdev); 205 struct Scsi_Host *shost = class_to_shost(cdev);
174 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 206 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
175 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 207 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
176 mode = (pm8001_ha->main_cfg_tbl.ctrl_cap_flag & 0xfe000000)>>25; 208 /* fe000000 means supports SAS2.1 */
209 if (pm8001_ha->chip_id == chip_8001)
210 mode = (pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag &
211 0xfe000000)>>25;
212 else
213 /* fe000000 means supports SAS2.1 */
214 mode = (pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag &
215 0xfe000000)>>25;
177 return show_sas_spec_support_status(mode, buf); 216 return show_sas_spec_support_status(mode, buf);
178} 217}
179static DEVICE_ATTR(sas_spec_support, S_IRUGO, 218static DEVICE_ATTR(sas_spec_support, S_IRUGO,
@@ -361,10 +400,11 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
361 goto out; 400 goto out;
362 } 401 }
363 payload = (struct pm8001_ioctl_payload *)ioctlbuffer; 402 payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
364 memcpy((u8 *)payload->func_specific, (u8 *)pm8001_ha->fw_image->data, 403 memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
365 pm8001_ha->fw_image->size); 404 pm8001_ha->fw_image->size);
366 payload->length = pm8001_ha->fw_image->size; 405 payload->length = pm8001_ha->fw_image->size;
367 payload->id = 0; 406 payload->id = 0;
407 payload->minor_function = 0x1;
368 pm8001_ha->nvmd_completion = &completion; 408 pm8001_ha->nvmd_completion = &completion;
369 ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload); 409 ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload);
370 wait_for_completion(&completion); 410 wait_for_completion(&completion);
@@ -411,7 +451,7 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
411 payload->length = 1024*16; 451 payload->length = 1024*16;
412 payload->id = 0; 452 payload->id = 0;
413 fwControl = 453 fwControl =
414 (struct fw_control_info *)payload->func_specific; 454 (struct fw_control_info *)&payload->func_specific;
415 fwControl->len = IOCTL_BUF_SIZE; /* IN */ 455 fwControl->len = IOCTL_BUF_SIZE; /* IN */
416 fwControl->size = partitionSize + HEADER_LEN;/* IN */ 456 fwControl->size = partitionSize + HEADER_LEN;/* IN */
417 fwControl->retcode = 0;/* OUT */ 457 fwControl->retcode = 0;/* OUT */
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index c3d20c8d4abe..479c5a7a863a 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver 2 * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
3 * 3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd. 4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved. 5 * All rights reserved.
@@ -43,9 +43,12 @@
43 43
44enum chip_flavors { 44enum chip_flavors {
45 chip_8001, 45 chip_8001,
46 chip_8008,
47 chip_8009,
48 chip_8018,
49 chip_8019
46}; 50};
47#define USI_MAX_MEMCNT 9 51
48#define PM8001_MAX_DMA_SG SG_ALL
49enum phy_speed { 52enum phy_speed {
50 PHY_SPEED_15 = 0x01, 53 PHY_SPEED_15 = 0x01,
51 PHY_SPEED_30 = 0x02, 54 PHY_SPEED_30 = 0x02,
@@ -69,23 +72,34 @@ enum port_type {
69#define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */ 72#define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */
70#define PM8001_MAX_INB_NUM 1 73#define PM8001_MAX_INB_NUM 1
71#define PM8001_MAX_OUTB_NUM 1 74#define PM8001_MAX_OUTB_NUM 1
75#define PM8001_MAX_SPCV_INB_NUM 1
76#define PM8001_MAX_SPCV_OUTB_NUM 4
72#define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */ 77#define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */
73 78
79/* Inbound/Outbound queue size */
80#define IOMB_SIZE_SPC 64
81#define IOMB_SIZE_SPCV 128
82
74/* unchangeable hardware details */ 83/* unchangeable hardware details */
75#define PM8001_MAX_PHYS 8 /* max. possible phys */ 84#define PM8001_MAX_PHYS 16 /* max. possible phys */
76#define PM8001_MAX_PORTS 8 /* max. possible ports */ 85#define PM8001_MAX_PORTS 16 /* max. possible ports */
77#define PM8001_MAX_DEVICES 1024 /* max supported device */ 86#define PM8001_MAX_DEVICES 2048 /* max supported device */
87#define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */
78 88
89#define USI_MAX_MEMCNT_BASE 5
90#define IB (USI_MAX_MEMCNT_BASE + 1)
91#define CI (IB + PM8001_MAX_SPCV_INB_NUM)
92#define OB (CI + PM8001_MAX_SPCV_INB_NUM)
93#define PI (OB + PM8001_MAX_SPCV_OUTB_NUM)
94#define USI_MAX_MEMCNT (PI + PM8001_MAX_SPCV_OUTB_NUM)
95#define PM8001_MAX_DMA_SG SG_ALL
79enum memory_region_num { 96enum memory_region_num {
80 AAP1 = 0x0, /* application acceleration processor */ 97 AAP1 = 0x0, /* application acceleration processor */
81 IOP, /* IO processor */ 98 IOP, /* IO processor */
82 CI, /* consumer index */
83 PI, /* producer index */
84 IB, /* inbound queue */
85 OB, /* outbound queue */
86 NVMD, /* NVM device */ 99 NVMD, /* NVM device */
87 DEV_MEM, /* memory for devices */ 100 DEV_MEM, /* memory for devices */
88 CCB_MEM, /* memory for command control block */ 101 CCB_MEM, /* memory for command control block */
102 FW_FLASH /* memory for fw flash update */
89}; 103};
90#define PM8001_EVENT_LOG_SIZE (128 * 1024) 104#define PM8001_EVENT_LOG_SIZE (128 * 1024)
91 105
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index b8dd05074abb..69dd49c05f1e 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -50,32 +50,39 @@
50static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) 50static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
51{ 51{
52 void __iomem *address = pm8001_ha->main_cfg_tbl_addr; 52 void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
53 pm8001_ha->main_cfg_tbl.signature = pm8001_mr32(address, 0x00); 53 pm8001_ha->main_cfg_tbl.pm8001_tbl.signature =
54 pm8001_ha->main_cfg_tbl.interface_rev = pm8001_mr32(address, 0x04); 54 pm8001_mr32(address, 0x00);
55 pm8001_ha->main_cfg_tbl.firmware_rev = pm8001_mr32(address, 0x08); 55 pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev =
56 pm8001_ha->main_cfg_tbl.max_out_io = pm8001_mr32(address, 0x0C); 56 pm8001_mr32(address, 0x04);
57 pm8001_ha->main_cfg_tbl.max_sgl = pm8001_mr32(address, 0x10); 57 pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev =
58 pm8001_ha->main_cfg_tbl.ctrl_cap_flag = pm8001_mr32(address, 0x14); 58 pm8001_mr32(address, 0x08);
59 pm8001_ha->main_cfg_tbl.gst_offset = pm8001_mr32(address, 0x18); 59 pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io =
60 pm8001_ha->main_cfg_tbl.inbound_queue_offset = 60 pm8001_mr32(address, 0x0C);
61 pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl =
62 pm8001_mr32(address, 0x10);
63 pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag =
64 pm8001_mr32(address, 0x14);
65 pm8001_ha->main_cfg_tbl.pm8001_tbl.gst_offset =
66 pm8001_mr32(address, 0x18);
67 pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_queue_offset =
61 pm8001_mr32(address, MAIN_IBQ_OFFSET); 68 pm8001_mr32(address, MAIN_IBQ_OFFSET);
62 pm8001_ha->main_cfg_tbl.outbound_queue_offset = 69 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_queue_offset =
63 pm8001_mr32(address, MAIN_OBQ_OFFSET); 70 pm8001_mr32(address, MAIN_OBQ_OFFSET);
64 pm8001_ha->main_cfg_tbl.hda_mode_flag = 71 pm8001_ha->main_cfg_tbl.pm8001_tbl.hda_mode_flag =
65 pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET); 72 pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET);
66 73
67 /* read analog Setting offset from the configuration table */ 74 /* read analog Setting offset from the configuration table */
68 pm8001_ha->main_cfg_tbl.anolog_setup_table_offset = 75 pm8001_ha->main_cfg_tbl.pm8001_tbl.anolog_setup_table_offset =
69 pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); 76 pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
70 77
71 /* read Error Dump Offset and Length */ 78 /* read Error Dump Offset and Length */
72 pm8001_ha->main_cfg_tbl.fatal_err_dump_offset0 = 79 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset0 =
73 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); 80 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
74 pm8001_ha->main_cfg_tbl.fatal_err_dump_length0 = 81 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length0 =
75 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); 82 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
76 pm8001_ha->main_cfg_tbl.fatal_err_dump_offset1 = 83 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset1 =
77 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); 84 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
78 pm8001_ha->main_cfg_tbl.fatal_err_dump_length1 = 85 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length1 =
79 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); 86 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
80} 87}
81 88
@@ -86,31 +93,56 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
86static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) 93static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
87{ 94{
88 void __iomem *address = pm8001_ha->general_stat_tbl_addr; 95 void __iomem *address = pm8001_ha->general_stat_tbl_addr;
89 pm8001_ha->gs_tbl.gst_len_mpistate = pm8001_mr32(address, 0x00); 96 pm8001_ha->gs_tbl.pm8001_tbl.gst_len_mpistate =
90 pm8001_ha->gs_tbl.iq_freeze_state0 = pm8001_mr32(address, 0x04); 97 pm8001_mr32(address, 0x00);
91 pm8001_ha->gs_tbl.iq_freeze_state1 = pm8001_mr32(address, 0x08); 98 pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state0 =
92 pm8001_ha->gs_tbl.msgu_tcnt = pm8001_mr32(address, 0x0C); 99 pm8001_mr32(address, 0x04);
93 pm8001_ha->gs_tbl.iop_tcnt = pm8001_mr32(address, 0x10); 100 pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state1 =
94 pm8001_ha->gs_tbl.reserved = pm8001_mr32(address, 0x14); 101 pm8001_mr32(address, 0x08);
95 pm8001_ha->gs_tbl.phy_state[0] = pm8001_mr32(address, 0x18); 102 pm8001_ha->gs_tbl.pm8001_tbl.msgu_tcnt =
96 pm8001_ha->gs_tbl.phy_state[1] = pm8001_mr32(address, 0x1C); 103 pm8001_mr32(address, 0x0C);
97 pm8001_ha->gs_tbl.phy_state[2] = pm8001_mr32(address, 0x20); 104 pm8001_ha->gs_tbl.pm8001_tbl.iop_tcnt =
98 pm8001_ha->gs_tbl.phy_state[3] = pm8001_mr32(address, 0x24); 105 pm8001_mr32(address, 0x10);
99 pm8001_ha->gs_tbl.phy_state[4] = pm8001_mr32(address, 0x28); 106 pm8001_ha->gs_tbl.pm8001_tbl.rsvd =
100 pm8001_ha->gs_tbl.phy_state[5] = pm8001_mr32(address, 0x2C); 107 pm8001_mr32(address, 0x14);
101 pm8001_ha->gs_tbl.phy_state[6] = pm8001_mr32(address, 0x30); 108 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[0] =
102 pm8001_ha->gs_tbl.phy_state[7] = pm8001_mr32(address, 0x34); 109 pm8001_mr32(address, 0x18);
103 pm8001_ha->gs_tbl.reserved1 = pm8001_mr32(address, 0x38); 110 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[1] =
104 pm8001_ha->gs_tbl.reserved2 = pm8001_mr32(address, 0x3C); 111 pm8001_mr32(address, 0x1C);
105 pm8001_ha->gs_tbl.reserved3 = pm8001_mr32(address, 0x40); 112 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[2] =
106 pm8001_ha->gs_tbl.recover_err_info[0] = pm8001_mr32(address, 0x44); 113 pm8001_mr32(address, 0x20);
107 pm8001_ha->gs_tbl.recover_err_info[1] = pm8001_mr32(address, 0x48); 114 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[3] =
108 pm8001_ha->gs_tbl.recover_err_info[2] = pm8001_mr32(address, 0x4C); 115 pm8001_mr32(address, 0x24);
109 pm8001_ha->gs_tbl.recover_err_info[3] = pm8001_mr32(address, 0x50); 116 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[4] =
110 pm8001_ha->gs_tbl.recover_err_info[4] = pm8001_mr32(address, 0x54); 117 pm8001_mr32(address, 0x28);
111 pm8001_ha->gs_tbl.recover_err_info[5] = pm8001_mr32(address, 0x58); 118 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[5] =
112 pm8001_ha->gs_tbl.recover_err_info[6] = pm8001_mr32(address, 0x5C); 119 pm8001_mr32(address, 0x2C);
113 pm8001_ha->gs_tbl.recover_err_info[7] = pm8001_mr32(address, 0x60); 120 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[6] =
121 pm8001_mr32(address, 0x30);
122 pm8001_ha->gs_tbl.pm8001_tbl.phy_state[7] =
123 pm8001_mr32(address, 0x34);
124 pm8001_ha->gs_tbl.pm8001_tbl.gpio_input_val =
125 pm8001_mr32(address, 0x38);
126 pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[0] =
127 pm8001_mr32(address, 0x3C);
128 pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[1] =
129 pm8001_mr32(address, 0x40);
130 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[0] =
131 pm8001_mr32(address, 0x44);
132 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[1] =
133 pm8001_mr32(address, 0x48);
134 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[2] =
135 pm8001_mr32(address, 0x4C);
136 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[3] =
137 pm8001_mr32(address, 0x50);
138 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[4] =
139 pm8001_mr32(address, 0x54);
140 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[5] =
141 pm8001_mr32(address, 0x58);
142 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[6] =
143 pm8001_mr32(address, 0x5C);
144 pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[7] =
145 pm8001_mr32(address, 0x60);
114} 146}
115 147
116/** 148/**
@@ -119,10 +151,9 @@ static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
119 */ 151 */
120static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) 152static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
121{ 153{
122 int inbQ_num = 1;
123 int i; 154 int i;
124 void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; 155 void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
125 for (i = 0; i < inbQ_num; i++) { 156 for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
126 u32 offset = i * 0x20; 157 u32 offset = i * 0x20;
127 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = 158 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
128 get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); 159 get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
@@ -137,10 +168,9 @@ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
137 */ 168 */
138static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) 169static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
139{ 170{
140 int outbQ_num = 1;
141 int i; 171 int i;
142 void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; 172 void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
143 for (i = 0; i < outbQ_num; i++) { 173 for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
144 u32 offset = i * 0x24; 174 u32 offset = i * 0x24;
145 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = 175 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
146 get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); 176 get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
@@ -155,54 +185,57 @@ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
155 */ 185 */
156static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) 186static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
157{ 187{
158 int qn = 1;
159 int i; 188 int i;
160 u32 offsetib, offsetob; 189 u32 offsetib, offsetob;
161 void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; 190 void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
162 void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; 191 void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
163 192
164 pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd = 0; 193 pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd = 0;
165 pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3 = 0; 194 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3 = 0;
166 pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7 = 0; 195 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7 = 0;
167 pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3 = 0; 196 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3 = 0;
168 pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7 = 0; 197 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7 = 0;
169 pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3 = 0; 198 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid0_3 =
170 pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7 = 0; 199 0;
171 pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3 = 0; 200 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid4_7 =
172 pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7 = 0; 201 0;
173 pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3 = 0; 202 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid0_3 = 0;
174 pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7 = 0; 203 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid4_7 = 0;
175 204 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid0_3 = 0;
176 pm8001_ha->main_cfg_tbl.upper_event_log_addr = 205 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid4_7 = 0;
206
207 pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr =
177 pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; 208 pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
178 pm8001_ha->main_cfg_tbl.lower_event_log_addr = 209 pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr =
179 pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; 210 pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
180 pm8001_ha->main_cfg_tbl.event_log_size = PM8001_EVENT_LOG_SIZE; 211 pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size =
181 pm8001_ha->main_cfg_tbl.event_log_option = 0x01; 212 PM8001_EVENT_LOG_SIZE;
182 pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr = 213 pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option = 0x01;
214 pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr =
183 pm8001_ha->memoryMap.region[IOP].phys_addr_hi; 215 pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
184 pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr = 216 pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr =
185 pm8001_ha->memoryMap.region[IOP].phys_addr_lo; 217 pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
186 pm8001_ha->main_cfg_tbl.iop_event_log_size = PM8001_EVENT_LOG_SIZE; 218 pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size =
187 pm8001_ha->main_cfg_tbl.iop_event_log_option = 0x01; 219 PM8001_EVENT_LOG_SIZE;
188 pm8001_ha->main_cfg_tbl.fatal_err_interrupt = 0x01; 220 pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01;
189 for (i = 0; i < qn; i++) { 221 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01;
222 for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
190 pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = 223 pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
191 PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30); 224 PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
192 pm8001_ha->inbnd_q_tbl[i].upper_base_addr = 225 pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
193 pm8001_ha->memoryMap.region[IB].phys_addr_hi; 226 pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
194 pm8001_ha->inbnd_q_tbl[i].lower_base_addr = 227 pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
195 pm8001_ha->memoryMap.region[IB].phys_addr_lo; 228 pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
196 pm8001_ha->inbnd_q_tbl[i].base_virt = 229 pm8001_ha->inbnd_q_tbl[i].base_virt =
197 (u8 *)pm8001_ha->memoryMap.region[IB].virt_ptr; 230 (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
198 pm8001_ha->inbnd_q_tbl[i].total_length = 231 pm8001_ha->inbnd_q_tbl[i].total_length =
199 pm8001_ha->memoryMap.region[IB].total_len; 232 pm8001_ha->memoryMap.region[IB + i].total_len;
200 pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = 233 pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr =
201 pm8001_ha->memoryMap.region[CI].phys_addr_hi; 234 pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
202 pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = 235 pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr =
203 pm8001_ha->memoryMap.region[CI].phys_addr_lo; 236 pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
204 pm8001_ha->inbnd_q_tbl[i].ci_virt = 237 pm8001_ha->inbnd_q_tbl[i].ci_virt =
205 pm8001_ha->memoryMap.region[CI].virt_ptr; 238 pm8001_ha->memoryMap.region[CI + i].virt_ptr;
206 offsetib = i * 0x20; 239 offsetib = i * 0x20;
207 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = 240 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
208 get_pci_bar_index(pm8001_mr32(addressib, 241 get_pci_bar_index(pm8001_mr32(addressib,
@@ -212,25 +245,25 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
212 pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; 245 pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
213 pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; 246 pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
214 } 247 }
215 for (i = 0; i < qn; i++) { 248 for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
216 pm8001_ha->outbnd_q_tbl[i].element_size_cnt = 249 pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
217 PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30); 250 PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
218 pm8001_ha->outbnd_q_tbl[i].upper_base_addr = 251 pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
219 pm8001_ha->memoryMap.region[OB].phys_addr_hi; 252 pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
220 pm8001_ha->outbnd_q_tbl[i].lower_base_addr = 253 pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
221 pm8001_ha->memoryMap.region[OB].phys_addr_lo; 254 pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
222 pm8001_ha->outbnd_q_tbl[i].base_virt = 255 pm8001_ha->outbnd_q_tbl[i].base_virt =
223 (u8 *)pm8001_ha->memoryMap.region[OB].virt_ptr; 256 (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
224 pm8001_ha->outbnd_q_tbl[i].total_length = 257 pm8001_ha->outbnd_q_tbl[i].total_length =
225 pm8001_ha->memoryMap.region[OB].total_len; 258 pm8001_ha->memoryMap.region[OB + i].total_len;
226 pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = 259 pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr =
227 pm8001_ha->memoryMap.region[PI].phys_addr_hi; 260 pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
228 pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = 261 pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr =
229 pm8001_ha->memoryMap.region[PI].phys_addr_lo; 262 pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
230 pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = 263 pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay =
231 0 | (10 << 16) | (0 << 24); 264 0 | (10 << 16) | (i << 24);
232 pm8001_ha->outbnd_q_tbl[i].pi_virt = 265 pm8001_ha->outbnd_q_tbl[i].pi_virt =
233 pm8001_ha->memoryMap.region[PI].virt_ptr; 266 pm8001_ha->memoryMap.region[PI + i].virt_ptr;
234 offsetob = i * 0x24; 267 offsetob = i * 0x24;
235 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = 268 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
236 get_pci_bar_index(pm8001_mr32(addressob, 269 get_pci_bar_index(pm8001_mr32(addressob,
@@ -250,42 +283,51 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
250{ 283{
251 void __iomem *address = pm8001_ha->main_cfg_tbl_addr; 284 void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
252 pm8001_mw32(address, 0x24, 285 pm8001_mw32(address, 0x24,
253 pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd); 286 pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd);
254 pm8001_mw32(address, 0x28, 287 pm8001_mw32(address, 0x28,
255 pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3); 288 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3);
256 pm8001_mw32(address, 0x2C, 289 pm8001_mw32(address, 0x2C,
257 pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7); 290 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7);
258 pm8001_mw32(address, 0x30, 291 pm8001_mw32(address, 0x30,
259 pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3); 292 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3);
260 pm8001_mw32(address, 0x34, 293 pm8001_mw32(address, 0x34,
261 pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7); 294 pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7);
262 pm8001_mw32(address, 0x38, 295 pm8001_mw32(address, 0x38,
263 pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3); 296 pm8001_ha->main_cfg_tbl.pm8001_tbl.
297 outbound_tgt_ITNexus_event_pid0_3);
264 pm8001_mw32(address, 0x3C, 298 pm8001_mw32(address, 0x3C,
265 pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7); 299 pm8001_ha->main_cfg_tbl.pm8001_tbl.
300 outbound_tgt_ITNexus_event_pid4_7);
266 pm8001_mw32(address, 0x40, 301 pm8001_mw32(address, 0x40,
267 pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3); 302 pm8001_ha->main_cfg_tbl.pm8001_tbl.
303 outbound_tgt_ssp_event_pid0_3);
268 pm8001_mw32(address, 0x44, 304 pm8001_mw32(address, 0x44,
269 pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7); 305 pm8001_ha->main_cfg_tbl.pm8001_tbl.
306 outbound_tgt_ssp_event_pid4_7);
270 pm8001_mw32(address, 0x48, 307 pm8001_mw32(address, 0x48,
271 pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3); 308 pm8001_ha->main_cfg_tbl.pm8001_tbl.
309 outbound_tgt_smp_event_pid0_3);
272 pm8001_mw32(address, 0x4C, 310 pm8001_mw32(address, 0x4C,
273 pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7); 311 pm8001_ha->main_cfg_tbl.pm8001_tbl.
312 outbound_tgt_smp_event_pid4_7);
274 pm8001_mw32(address, 0x50, 313 pm8001_mw32(address, 0x50,
275 pm8001_ha->main_cfg_tbl.upper_event_log_addr); 314 pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr);
276 pm8001_mw32(address, 0x54, 315 pm8001_mw32(address, 0x54,
277 pm8001_ha->main_cfg_tbl.lower_event_log_addr); 316 pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr);
278 pm8001_mw32(address, 0x58, pm8001_ha->main_cfg_tbl.event_log_size); 317 pm8001_mw32(address, 0x58,
279 pm8001_mw32(address, 0x5C, pm8001_ha->main_cfg_tbl.event_log_option); 318 pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size);
319 pm8001_mw32(address, 0x5C,
320 pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option);
280 pm8001_mw32(address, 0x60, 321 pm8001_mw32(address, 0x60,
281 pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr); 322 pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr);
282 pm8001_mw32(address, 0x64, 323 pm8001_mw32(address, 0x64,
283 pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr); 324 pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr);
284 pm8001_mw32(address, 0x68, pm8001_ha->main_cfg_tbl.iop_event_log_size); 325 pm8001_mw32(address, 0x68,
326 pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size);
285 pm8001_mw32(address, 0x6C, 327 pm8001_mw32(address, 0x6C,
286 pm8001_ha->main_cfg_tbl.iop_event_log_option); 328 pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option);
287 pm8001_mw32(address, 0x70, 329 pm8001_mw32(address, 0x70,
288 pm8001_ha->main_cfg_tbl.fatal_err_interrupt); 330 pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt);
289} 331}
290 332
291/** 333/**
@@ -597,6 +639,19 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
597 */ 639 */
598static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) 640static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
599{ 641{
642 u8 i = 0;
643 u16 deviceid;
644 pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
645 /* 8081 controllers need BAR shift to access MPI space
646 * as this is shared with BIOS data */
647 if (deviceid == 0x8081) {
648 if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
649 PM8001_FAIL_DBG(pm8001_ha,
650 pm8001_printk("Shift Bar4 to 0x%x failed\n",
651 GSM_SM_BASE));
652 return -1;
653 }
654 }
600 /* check the firmware status */ 655 /* check the firmware status */
601 if (-1 == check_fw_ready(pm8001_ha)) { 656 if (-1 == check_fw_ready(pm8001_ha)) {
602 PM8001_FAIL_DBG(pm8001_ha, 657 PM8001_FAIL_DBG(pm8001_ha,
@@ -613,11 +668,16 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
613 read_outbnd_queue_table(pm8001_ha); 668 read_outbnd_queue_table(pm8001_ha);
614 /* update main config table ,inbound table and outbound table */ 669 /* update main config table ,inbound table and outbound table */
615 update_main_config_table(pm8001_ha); 670 update_main_config_table(pm8001_ha);
616 update_inbnd_queue_table(pm8001_ha, 0); 671 for (i = 0; i < PM8001_MAX_INB_NUM; i++)
617 update_outbnd_queue_table(pm8001_ha, 0); 672 update_inbnd_queue_table(pm8001_ha, i);
618 mpi_set_phys_g3_with_ssc(pm8001_ha, 0); 673 for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
619 /* 7->130ms, 34->500ms, 119->1.5s */ 674 update_outbnd_queue_table(pm8001_ha, i);
620 mpi_set_open_retry_interval_reg(pm8001_ha, 119); 675 /* 8081 controller donot require these operations */
676 if (deviceid != 0x8081) {
677 mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
678 /* 7->130ms, 34->500ms, 119->1.5s */
679 mpi_set_open_retry_interval_reg(pm8001_ha, 119);
680 }
621 /* notify firmware update finished and check initialization status */ 681 /* notify firmware update finished and check initialization status */
622 if (0 == mpi_init_check(pm8001_ha)) { 682 if (0 == mpi_init_check(pm8001_ha)) {
623 PM8001_INIT_DBG(pm8001_ha, 683 PM8001_INIT_DBG(pm8001_ha,
@@ -639,6 +699,16 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
639 u32 max_wait_count; 699 u32 max_wait_count;
640 u32 value; 700 u32 value;
641 u32 gst_len_mpistate; 701 u32 gst_len_mpistate;
702 u16 deviceid;
703 pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
704 if (deviceid == 0x8081) {
705 if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
706 PM8001_FAIL_DBG(pm8001_ha,
707 pm8001_printk("Shift Bar4 to 0x%x failed\n",
708 GSM_SM_BASE));
709 return -1;
710 }
711 }
642 init_pci_device_addresses(pm8001_ha); 712 init_pci_device_addresses(pm8001_ha);
643 /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the 713 /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
644 table is stop */ 714 table is stop */
@@ -740,14 +810,14 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
740 * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all 810 * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
741 * the FW register status to the originated status. 811 * the FW register status to the originated status.
742 * @pm8001_ha: our hba card information 812 * @pm8001_ha: our hba card information
743 * @signature: signature in host scratch pad0 register.
744 */ 813 */
745static int 814static int
746pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature) 815pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
747{ 816{
748 u32 regVal, toggleVal; 817 u32 regVal, toggleVal;
749 u32 max_wait_count; 818 u32 max_wait_count;
750 u32 regVal1, regVal2, regVal3; 819 u32 regVal1, regVal2, regVal3;
820 u32 signature = 0x252acbcd; /* for host scratch pad0 */
751 unsigned long flags; 821 unsigned long flags;
752 822
753 /* step1: Check FW is ready for soft reset */ 823 /* step1: Check FW is ready for soft reset */
@@ -1113,7 +1183,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
1113 * pm8001_chip_iounmap - which maped when initialized. 1183 * pm8001_chip_iounmap - which maped when initialized.
1114 * @pm8001_ha: our hba card information 1184 * @pm8001_ha: our hba card information
1115 */ 1185 */
1116static void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) 1186void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
1117{ 1187{
1118 s8 bar, logical = 0; 1188 s8 bar, logical = 0;
1119 for (bar = 0; bar < 6; bar++) { 1189 for (bar = 0; bar < 6; bar++) {
@@ -1192,7 +1262,7 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
1192 * @pm8001_ha: our hba card information 1262 * @pm8001_ha: our hba card information
1193 */ 1263 */
1194static void 1264static void
1195pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha) 1265pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
1196{ 1266{
1197#ifdef PM8001_USE_MSIX 1267#ifdef PM8001_USE_MSIX
1198 pm8001_chip_msix_interrupt_enable(pm8001_ha, 0); 1268 pm8001_chip_msix_interrupt_enable(pm8001_ha, 0);
@@ -1207,7 +1277,7 @@ pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
1207 * @pm8001_ha: our hba card information 1277 * @pm8001_ha: our hba card information
1208 */ 1278 */
1209static void 1279static void
1210pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha) 1280pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
1211{ 1281{
1212#ifdef PM8001_USE_MSIX 1282#ifdef PM8001_USE_MSIX
1213 pm8001_chip_msix_interrupt_disable(pm8001_ha, 0); 1283 pm8001_chip_msix_interrupt_disable(pm8001_ha, 0);
@@ -1218,12 +1288,13 @@ pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
1218} 1288}
1219 1289
1220/** 1290/**
1221 * mpi_msg_free_get- get the free message buffer for transfer inbound queue. 1291 * pm8001_mpi_msg_free_get - get the free message buffer for transfer
1292 * inbound queue.
1222 * @circularQ: the inbound queue we want to transfer to HBA. 1293 * @circularQ: the inbound queue we want to transfer to HBA.
1223 * @messageSize: the message size of this transfer, normally it is 64 bytes 1294 * @messageSize: the message size of this transfer, normally it is 64 bytes
1224 * @messagePtr: the pointer to message. 1295 * @messagePtr: the pointer to message.
1225 */ 1296 */
1226static int mpi_msg_free_get(struct inbound_queue_table *circularQ, 1297int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
1227 u16 messageSize, void **messagePtr) 1298 u16 messageSize, void **messagePtr)
1228{ 1299{
1229 u32 offset, consumer_index; 1300 u32 offset, consumer_index;
@@ -1231,7 +1302,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
1231 u8 bcCount = 1; /* only support single buffer */ 1302 u8 bcCount = 1; /* only support single buffer */
1232 1303
1233 /* Checks is the requested message size can be allocated in this queue*/ 1304 /* Checks is the requested message size can be allocated in this queue*/
1234 if (messageSize > 64) { 1305 if (messageSize > IOMB_SIZE_SPCV) {
1235 *messagePtr = NULL; 1306 *messagePtr = NULL;
1236 return -1; 1307 return -1;
1237 } 1308 }
@@ -1245,7 +1316,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
1245 return -1; 1316 return -1;
1246 } 1317 }
1247 /* get memory IOMB buffer address */ 1318 /* get memory IOMB buffer address */
1248 offset = circularQ->producer_idx * 64; 1319 offset = circularQ->producer_idx * messageSize;
1249 /* increment to next bcCount element */ 1320 /* increment to next bcCount element */
1250 circularQ->producer_idx = (circularQ->producer_idx + bcCount) 1321 circularQ->producer_idx = (circularQ->producer_idx + bcCount)
1251 % PM8001_MPI_QUEUE; 1322 % PM8001_MPI_QUEUE;
@@ -1257,29 +1328,30 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
1257} 1328}
1258 1329
1259/** 1330/**
1260 * mpi_build_cmd- build the message queue for transfer, update the PI to FW 1331 * pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to
1261 * to tell the fw to get this message from IOMB. 1332 * FW to tell the fw to get this message from IOMB.
1262 * @pm8001_ha: our hba card information 1333 * @pm8001_ha: our hba card information
1263 * @circularQ: the inbound queue we want to transfer to HBA. 1334 * @circularQ: the inbound queue we want to transfer to HBA.
1264 * @opCode: the operation code represents commands which LLDD and fw recognized. 1335 * @opCode: the operation code represents commands which LLDD and fw recognized.
1265 * @payload: the command payload of each operation command. 1336 * @payload: the command payload of each operation command.
1266 */ 1337 */
1267static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, 1338int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
1268 struct inbound_queue_table *circularQ, 1339 struct inbound_queue_table *circularQ,
1269 u32 opCode, void *payload) 1340 u32 opCode, void *payload, u32 responseQueue)
1270{ 1341{
1271 u32 Header = 0, hpriority = 0, bc = 1, category = 0x02; 1342 u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
1272 u32 responseQueue = 0;
1273 void *pMessage; 1343 void *pMessage;
1274 1344
1275 if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) { 1345 if (pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size,
1346 &pMessage) < 0) {
1276 PM8001_IO_DBG(pm8001_ha, 1347 PM8001_IO_DBG(pm8001_ha,
1277 pm8001_printk("No free mpi buffer\n")); 1348 pm8001_printk("No free mpi buffer\n"));
1278 return -1; 1349 return -1;
1279 } 1350 }
1280 BUG_ON(!payload); 1351 BUG_ON(!payload);
1281 /*Copy to the payload*/ 1352 /*Copy to the payload*/
1282 memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr))); 1353 memcpy(pMessage, payload, (pm8001_ha->iomb_size -
1354 sizeof(struct mpi_msg_hdr)));
1283 1355
1284 /*Build the header*/ 1356 /*Build the header*/
1285 Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24) 1357 Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24)
@@ -1291,12 +1363,13 @@ static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
1291 pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar, 1363 pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
1292 circularQ->pi_offset, circularQ->producer_idx); 1364 circularQ->pi_offset, circularQ->producer_idx);
1293 PM8001_IO_DBG(pm8001_ha, 1365 PM8001_IO_DBG(pm8001_ha,
1294 pm8001_printk("after PI= %d CI= %d\n", circularQ->producer_idx, 1366 pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n",
1295 circularQ->consumer_index)); 1367 responseQueue, opCode, circularQ->producer_idx,
1368 circularQ->consumer_index));
1296 return 0; 1369 return 0;
1297} 1370}
1298 1371
1299static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, 1372u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
1300 struct outbound_queue_table *circularQ, u8 bc) 1373 struct outbound_queue_table *circularQ, u8 bc)
1301{ 1374{
1302 u32 producer_index; 1375 u32 producer_index;
@@ -1305,7 +1378,7 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
1305 1378
1306 msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr)); 1379 msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr));
1307 pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + 1380 pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt +
1308 circularQ->consumer_idx * 64); 1381 circularQ->consumer_idx * pm8001_ha->iomb_size);
1309 if (pOutBoundMsgHeader != msgHeader) { 1382 if (pOutBoundMsgHeader != msgHeader) {
1310 PM8001_FAIL_DBG(pm8001_ha, 1383 PM8001_FAIL_DBG(pm8001_ha,
1311 pm8001_printk("consumer_idx = %d msgHeader = %p\n", 1384 pm8001_printk("consumer_idx = %d msgHeader = %p\n",
@@ -1336,13 +1409,14 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
1336} 1409}
1337 1410
1338/** 1411/**
1339 * mpi_msg_consume- get the MPI message from outbound queue message table. 1412 * pm8001_mpi_msg_consume- get the MPI message from outbound queue
1413 * message table.
1340 * @pm8001_ha: our hba card information 1414 * @pm8001_ha: our hba card information
1341 * @circularQ: the outbound queue table. 1415 * @circularQ: the outbound queue table.
1342 * @messagePtr1: the message contents of this outbound message. 1416 * @messagePtr1: the message contents of this outbound message.
1343 * @pBC: the message size. 1417 * @pBC: the message size.
1344 */ 1418 */
1345static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, 1419u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
1346 struct outbound_queue_table *circularQ, 1420 struct outbound_queue_table *circularQ,
1347 void **messagePtr1, u8 *pBC) 1421 void **messagePtr1, u8 *pBC)
1348{ 1422{
@@ -1356,7 +1430,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
1356 /*Get the pointer to the circular queue buffer element*/ 1430 /*Get the pointer to the circular queue buffer element*/
1357 msgHeader = (struct mpi_msg_hdr *) 1431 msgHeader = (struct mpi_msg_hdr *)
1358 (circularQ->base_virt + 1432 (circularQ->base_virt +
1359 circularQ->consumer_idx * 64); 1433 circularQ->consumer_idx * pm8001_ha->iomb_size);
1360 /* read header */ 1434 /* read header */
1361 header_tmp = pm8001_read_32(msgHeader); 1435 header_tmp = pm8001_read_32(msgHeader);
1362 msgHeader_tmp = cpu_to_le32(header_tmp); 1436 msgHeader_tmp = cpu_to_le32(header_tmp);
@@ -1416,7 +1490,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
1416 return MPI_IO_STATUS_BUSY; 1490 return MPI_IO_STATUS_BUSY;
1417} 1491}
1418 1492
1419static void pm8001_work_fn(struct work_struct *work) 1493void pm8001_work_fn(struct work_struct *work)
1420{ 1494{
1421 struct pm8001_work *pw = container_of(work, struct pm8001_work, work); 1495 struct pm8001_work *pw = container_of(work, struct pm8001_work, work);
1422 struct pm8001_device *pm8001_dev; 1496 struct pm8001_device *pm8001_dev;
@@ -1431,7 +1505,7 @@ static void pm8001_work_fn(struct work_struct *work)
1431 pm8001_dev = pw->data; /* Most stash device structure */ 1505 pm8001_dev = pw->data; /* Most stash device structure */
1432 if ((pm8001_dev == NULL) 1506 if ((pm8001_dev == NULL)
1433 || ((pw->handler != IO_XFER_ERROR_BREAK) 1507 || ((pw->handler != IO_XFER_ERROR_BREAK)
1434 && (pm8001_dev->dev_type == NO_DEVICE))) { 1508 && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) {
1435 kfree(pw); 1509 kfree(pw);
1436 return; 1510 return;
1437 } 1511 }
@@ -1596,7 +1670,7 @@ static void pm8001_work_fn(struct work_struct *work)
1596 } break; 1670 } break;
1597 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 1671 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
1598 dev = pm8001_dev->sas_device; 1672 dev = pm8001_dev->sas_device;
1599 pm8001_I_T_nexus_reset(dev); 1673 pm8001_I_T_nexus_event_handler(dev);
1600 break; 1674 break;
1601 case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 1675 case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
1602 dev = pm8001_dev->sas_device; 1676 dev = pm8001_dev->sas_device;
@@ -1614,7 +1688,7 @@ static void pm8001_work_fn(struct work_struct *work)
1614 kfree(pw); 1688 kfree(pw);
1615} 1689}
1616 1690
1617static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, 1691int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
1618 int handler) 1692 int handler)
1619{ 1693{
1620 struct pm8001_work *pw; 1694 struct pm8001_work *pw;
@@ -1633,6 +1707,123 @@ static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
1633 return ret; 1707 return ret;
1634} 1708}
1635 1709
1710static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
1711 struct pm8001_device *pm8001_ha_dev)
1712{
1713 int res;
1714 u32 ccb_tag;
1715 struct pm8001_ccb_info *ccb;
1716 struct sas_task *task = NULL;
1717 struct task_abort_req task_abort;
1718 struct inbound_queue_table *circularQ;
1719 u32 opc = OPC_INB_SATA_ABORT;
1720 int ret;
1721
1722 if (!pm8001_ha_dev) {
1723 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
1724 return;
1725 }
1726
1727 task = sas_alloc_slow_task(GFP_ATOMIC);
1728
1729 if (!task) {
1730 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
1731 "allocate task\n"));
1732 return;
1733 }
1734
1735 task->task_done = pm8001_task_done;
1736
1737 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
1738 if (res)
1739 return;
1740
1741 ccb = &pm8001_ha->ccb_info[ccb_tag];
1742 ccb->device = pm8001_ha_dev;
1743 ccb->ccb_tag = ccb_tag;
1744 ccb->task = task;
1745
1746 circularQ = &pm8001_ha->inbnd_q_tbl[0];
1747
1748 memset(&task_abort, 0, sizeof(task_abort));
1749 task_abort.abort_all = cpu_to_le32(1);
1750 task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
1751 task_abort.tag = cpu_to_le32(ccb_tag);
1752
1753 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
1754
1755}
1756
1757static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
1758 struct pm8001_device *pm8001_ha_dev)
1759{
1760 struct sata_start_req sata_cmd;
1761 int res;
1762 u32 ccb_tag;
1763 struct pm8001_ccb_info *ccb;
1764 struct sas_task *task = NULL;
1765 struct host_to_dev_fis fis;
1766 struct domain_device *dev;
1767 struct inbound_queue_table *circularQ;
1768 u32 opc = OPC_INB_SATA_HOST_OPSTART;
1769
1770 task = sas_alloc_slow_task(GFP_ATOMIC);
1771
1772 if (!task) {
1773 PM8001_FAIL_DBG(pm8001_ha,
1774 pm8001_printk("cannot allocate task !!!\n"));
1775 return;
1776 }
1777 task->task_done = pm8001_task_done;
1778
1779 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
1780 if (res) {
1781 PM8001_FAIL_DBG(pm8001_ha,
1782 pm8001_printk("cannot allocate tag !!!\n"));
1783 return;
1784 }
1785
1786 /* allocate domain device by ourselves as libsas
1787 * is not going to provide any
1788 */
1789 dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
1790 if (!dev) {
1791 PM8001_FAIL_DBG(pm8001_ha,
1792 pm8001_printk("Domain device cannot be allocated\n"));
1793 sas_free_task(task);
1794 return;
1795 } else {
1796 task->dev = dev;
1797 task->dev->lldd_dev = pm8001_ha_dev;
1798 }
1799
1800 ccb = &pm8001_ha->ccb_info[ccb_tag];
1801 ccb->device = pm8001_ha_dev;
1802 ccb->ccb_tag = ccb_tag;
1803 ccb->task = task;
1804 pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
1805 pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
1806
1807 memset(&sata_cmd, 0, sizeof(sata_cmd));
1808 circularQ = &pm8001_ha->inbnd_q_tbl[0];
1809
1810 /* construct read log FIS */
1811 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1812 fis.fis_type = 0x27;
1813 fis.flags = 0x80;
1814 fis.command = ATA_CMD_READ_LOG_EXT;
1815 fis.lbal = 0x10;
1816 fis.sector_count = 0x1;
1817
1818 sata_cmd.tag = cpu_to_le32(ccb_tag);
1819 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
1820 sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
1821 memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
1822
1823 res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
1824
1825}
1826
1636/** 1827/**
1637 * mpi_ssp_completion- process the event that FW response to the SSP request. 1828 * mpi_ssp_completion- process the event that FW response to the SSP request.
1638 * @pm8001_ha: our hba card information 1829 * @pm8001_ha: our hba card information
@@ -1867,7 +2058,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
1867 break; 2058 break;
1868 } 2059 }
1869 PM8001_IO_DBG(pm8001_ha, 2060 PM8001_IO_DBG(pm8001_ha,
1870 pm8001_printk("scsi_status = %x \n ", 2061 pm8001_printk("scsi_status = %x\n ",
1871 psspPayload->ssp_resp_iu.status)); 2062 psspPayload->ssp_resp_iu.status));
1872 spin_lock_irqsave(&t->task_state_lock, flags); 2063 spin_lock_irqsave(&t->task_state_lock, flags);
1873 t->task_state_flags &= ~SAS_TASK_STATE_PENDING; 2064 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
@@ -2096,16 +2287,44 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2096 status = le32_to_cpu(psataPayload->status); 2287 status = le32_to_cpu(psataPayload->status);
2097 tag = le32_to_cpu(psataPayload->tag); 2288 tag = le32_to_cpu(psataPayload->tag);
2098 2289
2290 if (!tag) {
2291 PM8001_FAIL_DBG(pm8001_ha,
2292 pm8001_printk("tag null\n"));
2293 return;
2294 }
2099 ccb = &pm8001_ha->ccb_info[tag]; 2295 ccb = &pm8001_ha->ccb_info[tag];
2100 param = le32_to_cpu(psataPayload->param); 2296 param = le32_to_cpu(psataPayload->param);
2101 t = ccb->task; 2297 if (ccb) {
2298 t = ccb->task;
2299 pm8001_dev = ccb->device;
2300 } else {
2301 PM8001_FAIL_DBG(pm8001_ha,
2302 pm8001_printk("ccb null\n"));
2303 return;
2304 }
2305
2306 if (t) {
2307 if (t->dev && (t->dev->lldd_dev))
2308 pm8001_dev = t->dev->lldd_dev;
2309 } else {
2310 PM8001_FAIL_DBG(pm8001_ha,
2311 pm8001_printk("task null\n"));
2312 return;
2313 }
2314
2315 if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
2316 && unlikely(!t || !t->lldd_task || !t->dev)) {
2317 PM8001_FAIL_DBG(pm8001_ha,
2318 pm8001_printk("task or dev null\n"));
2319 return;
2320 }
2321
2102 ts = &t->task_status; 2322 ts = &t->task_status;
2103 pm8001_dev = ccb->device; 2323 if (!ts) {
2104 if (status)
2105 PM8001_FAIL_DBG(pm8001_ha, 2324 PM8001_FAIL_DBG(pm8001_ha,
2106 pm8001_printk("sata IO status 0x%x\n", status)); 2325 pm8001_printk("ts null\n"));
2107 if (unlikely(!t || !t->lldd_task || !t->dev))
2108 return; 2326 return;
2327 }
2109 2328
2110 switch (status) { 2329 switch (status) {
2111 case IO_SUCCESS: 2330 case IO_SUCCESS:
@@ -2113,6 +2332,19 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2113 if (param == 0) { 2332 if (param == 0) {
2114 ts->resp = SAS_TASK_COMPLETE; 2333 ts->resp = SAS_TASK_COMPLETE;
2115 ts->stat = SAM_STAT_GOOD; 2334 ts->stat = SAM_STAT_GOOD;
2335 /* check if response is for SEND READ LOG */
2336 if (pm8001_dev &&
2337 (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
2338 /* set new bit for abort_all */
2339 pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
2340 /* clear bit for read log */
2341 pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
2342 pm8001_send_abort_all(pm8001_ha, pm8001_dev);
2343 /* Free the tag */
2344 pm8001_tag_free(pm8001_ha, tag);
2345 sas_free_task(t);
2346 return;
2347 }
2116 } else { 2348 } else {
2117 u8 len; 2349 u8 len;
2118 ts->resp = SAS_TASK_COMPLETE; 2350 ts->resp = SAS_TASK_COMPLETE;
@@ -2424,6 +2656,29 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2424 unsigned long flags; 2656 unsigned long flags;
2425 2657
2426 ccb = &pm8001_ha->ccb_info[tag]; 2658 ccb = &pm8001_ha->ccb_info[tag];
2659
2660 if (ccb) {
2661 t = ccb->task;
2662 pm8001_dev = ccb->device;
2663 } else {
2664 PM8001_FAIL_DBG(pm8001_ha,
2665 pm8001_printk("No CCB !!!. returning\n"));
2666 }
2667 if (event)
2668 PM8001_FAIL_DBG(pm8001_ha,
2669 pm8001_printk("SATA EVENT 0x%x\n", event));
2670
2671 /* Check if this is NCQ error */
2672 if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
2673 /* find device using device id */
2674 pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
2675 /* send read log extension */
2676 if (pm8001_dev)
2677 pm8001_send_read_log(pm8001_ha, pm8001_dev);
2678 return;
2679 }
2680
2681 ccb = &pm8001_ha->ccb_info[tag];
2427 t = ccb->task; 2682 t = ccb->task;
2428 pm8001_dev = ccb->device; 2683 pm8001_dev = ccb->device;
2429 if (event) 2684 if (event)
@@ -2432,9 +2687,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2432 if (unlikely(!t || !t->lldd_task || !t->dev)) 2687 if (unlikely(!t || !t->lldd_task || !t->dev))
2433 return; 2688 return;
2434 ts = &t->task_status; 2689 ts = &t->task_status;
2435 PM8001_IO_DBG(pm8001_ha, 2690 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
2436 pm8001_printk("port_id = %x,device_id = %x\n", 2691 "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n",
2437 port_id, dev_id)); 2692 port_id, dev_id, tag, event));
2438 switch (event) { 2693 switch (event) {
2439 case IO_OVERFLOW: 2694 case IO_OVERFLOW:
2440 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); 2695 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
@@ -2822,8 +3077,8 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2822 } 3077 }
2823} 3078}
2824 3079
2825static void 3080void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
2826mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3081 void *piomb)
2827{ 3082{
2828 struct set_dev_state_resp *pPayload = 3083 struct set_dev_state_resp *pPayload =
2829 (struct set_dev_state_resp *)(piomb + 4); 3084 (struct set_dev_state_resp *)(piomb + 4);
@@ -2843,8 +3098,7 @@ mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
2843 pm8001_ccb_free(pm8001_ha, tag); 3098 pm8001_ccb_free(pm8001_ha, tag);
2844} 3099}
2845 3100
2846static void 3101void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
2847mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
2848{ 3102{
2849 struct get_nvm_data_resp *pPayload = 3103 struct get_nvm_data_resp *pPayload =
2850 (struct get_nvm_data_resp *)(piomb + 4); 3104 (struct get_nvm_data_resp *)(piomb + 4);
@@ -2863,8 +3117,8 @@ mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
2863 pm8001_ccb_free(pm8001_ha, tag); 3117 pm8001_ccb_free(pm8001_ha, tag);
2864} 3118}
2865 3119
2866static void 3120void
2867mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3121pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
2868{ 3122{
2869 struct fw_control_ex *fw_control_context; 3123 struct fw_control_ex *fw_control_context;
2870 struct get_nvm_data_resp *pPayload = 3124 struct get_nvm_data_resp *pPayload =
@@ -2925,7 +3179,7 @@ mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
2925 pm8001_ccb_free(pm8001_ha, tag); 3179 pm8001_ccb_free(pm8001_ha, tag);
2926} 3180}
2927 3181
2928static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) 3182int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
2929{ 3183{
2930 struct local_phy_ctl_resp *pPayload = 3184 struct local_phy_ctl_resp *pPayload =
2931 (struct local_phy_ctl_resp *)(piomb + 4); 3185 (struct local_phy_ctl_resp *)(piomb + 4);
@@ -2954,7 +3208,7 @@ static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
2954 * while receive a broadcast(change) primitive just tell the sas 3208 * while receive a broadcast(change) primitive just tell the sas
2955 * layer to discover the changed domain rather than the whole domain. 3209 * layer to discover the changed domain rather than the whole domain.
2956 */ 3210 */
2957static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) 3211void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
2958{ 3212{
2959 struct pm8001_phy *phy = &pm8001_ha->phy[i]; 3213 struct pm8001_phy *phy = &pm8001_ha->phy[i];
2960 struct asd_sas_phy *sas_phy = &phy->sas_phy; 3214 struct asd_sas_phy *sas_phy = &phy->sas_phy;
@@ -2988,7 +3242,7 @@ static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
2988} 3242}
2989 3243
2990/* Get the link rate speed */ 3244/* Get the link rate speed */
2991static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) 3245void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
2992{ 3246{
2993 struct sas_phy *sas_phy = phy->sas_phy.phy; 3247 struct sas_phy *sas_phy = phy->sas_phy.phy;
2994 3248
@@ -3025,7 +3279,7 @@ static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
3025 * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame 3279 * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame
3026 * buffer. 3280 * buffer.
3027 */ 3281 */
3028static void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, 3282void pm8001_get_attached_sas_addr(struct pm8001_phy *phy,
3029 u8 *sas_addr) 3283 u8 *sas_addr)
3030{ 3284{
3031 if (phy->sas_phy.frame_rcvd[0] == 0x34 3285 if (phy->sas_phy.frame_rcvd[0] == 0x34
@@ -3067,7 +3321,7 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
3067 ((phyId & 0x0F) << 4) | (port_id & 0x0F)); 3321 ((phyId & 0x0F) << 4) | (port_id & 0x0F));
3068 payload.param0 = cpu_to_le32(param0); 3322 payload.param0 = cpu_to_le32(param0);
3069 payload.param1 = cpu_to_le32(param1); 3323 payload.param1 = cpu_to_le32(param1);
3070 mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 3324 pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
3071} 3325}
3072 3326
3073static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, 3327static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -3112,19 +3366,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
3112 pm8001_chip_phy_ctl_req(pm8001_ha, phy_id, 3366 pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
3113 PHY_NOTIFY_ENABLE_SPINUP); 3367 PHY_NOTIFY_ENABLE_SPINUP);
3114 port->port_attached = 1; 3368 port->port_attached = 1;
3115 get_lrate_mode(phy, link_rate); 3369 pm8001_get_lrate_mode(phy, link_rate);
3116 break; 3370 break;
3117 case SAS_EDGE_EXPANDER_DEVICE: 3371 case SAS_EDGE_EXPANDER_DEVICE:
3118 PM8001_MSG_DBG(pm8001_ha, 3372 PM8001_MSG_DBG(pm8001_ha,
3119 pm8001_printk("expander device.\n")); 3373 pm8001_printk("expander device.\n"));
3120 port->port_attached = 1; 3374 port->port_attached = 1;
3121 get_lrate_mode(phy, link_rate); 3375 pm8001_get_lrate_mode(phy, link_rate);
3122 break; 3376 break;
3123 case SAS_FANOUT_EXPANDER_DEVICE: 3377 case SAS_FANOUT_EXPANDER_DEVICE:
3124 PM8001_MSG_DBG(pm8001_ha, 3378 PM8001_MSG_DBG(pm8001_ha,
3125 pm8001_printk("fanout expander device.\n")); 3379 pm8001_printk("fanout expander device.\n"));
3126 port->port_attached = 1; 3380 port->port_attached = 1;
3127 get_lrate_mode(phy, link_rate); 3381 pm8001_get_lrate_mode(phy, link_rate);
3128 break; 3382 break;
3129 default: 3383 default:
3130 PM8001_MSG_DBG(pm8001_ha, 3384 PM8001_MSG_DBG(pm8001_ha,
@@ -3179,7 +3433,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
3179 " phy id = %d\n", port_id, phy_id)); 3433 " phy id = %d\n", port_id, phy_id));
3180 port->port_state = portstate; 3434 port->port_state = portstate;
3181 port->port_attached = 1; 3435 port->port_attached = 1;
3182 get_lrate_mode(phy, link_rate); 3436 pm8001_get_lrate_mode(phy, link_rate);
3183 phy->phy_type |= PORT_TYPE_SATA; 3437 phy->phy_type |= PORT_TYPE_SATA;
3184 phy->phy_attached = 1; 3438 phy->phy_attached = 1;
3185 phy->sas_phy.oob_mode = SATA_OOB_MODE; 3439 phy->sas_phy.oob_mode = SATA_OOB_MODE;
@@ -3189,7 +3443,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
3189 sizeof(struct dev_to_host_fis)); 3443 sizeof(struct dev_to_host_fis));
3190 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); 3444 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
3191 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; 3445 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
3192 phy->identify.device_type = SATA_DEV; 3446 phy->identify.device_type = SAS_SATA_DEV;
3193 pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); 3447 pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
3194 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); 3448 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
3195 pm8001_bytes_dmaed(pm8001_ha, phy_id); 3449 pm8001_bytes_dmaed(pm8001_ha, phy_id);
@@ -3260,7 +3514,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
3260} 3514}
3261 3515
3262/** 3516/**
3263 * mpi_reg_resp -process register device ID response. 3517 * pm8001_mpi_reg_resp -process register device ID response.
3264 * @pm8001_ha: our hba card information 3518 * @pm8001_ha: our hba card information
3265 * @piomb: IO message buffer 3519 * @piomb: IO message buffer
3266 * 3520 *
@@ -3269,7 +3523,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
3269 * has assigned, from now,inter-communication with FW is no longer using the 3523 * has assigned, from now,inter-communication with FW is no longer using the
3270 * SAS address, use device ID which FW assigned. 3524 * SAS address, use device ID which FW assigned.
3271 */ 3525 */
3272static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3526int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3273{ 3527{
3274 u32 status; 3528 u32 status;
3275 u32 device_id; 3529 u32 device_id;
@@ -3331,7 +3585,7 @@ static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3331 return 0; 3585 return 0;
3332} 3586}
3333 3587
3334static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3588int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3335{ 3589{
3336 u32 status; 3590 u32 status;
3337 u32 device_id; 3591 u32 device_id;
@@ -3347,8 +3601,13 @@ static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3347 return 0; 3601 return 0;
3348} 3602}
3349 3603
3350static int 3604/**
3351mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3605 * fw_flash_update_resp - Response from FW for flash update command.
3606 * @pm8001_ha: our hba card information
3607 * @piomb: IO message buffer
3608 */
3609int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
3610 void *piomb)
3352{ 3611{
3353 u32 status; 3612 u32 status;
3354 struct fw_control_ex fw_control_context; 3613 struct fw_control_ex fw_control_context;
@@ -3403,10 +3662,6 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3403 break; 3662 break;
3404 } 3663 }
3405 ccb->fw_control_context->fw_control->retcode = status; 3664 ccb->fw_control_context->fw_control->retcode = status;
3406 pci_free_consistent(pm8001_ha->pdev,
3407 fw_control_context.len,
3408 fw_control_context.virtAddr,
3409 fw_control_context.phys_addr);
3410 complete(pm8001_ha->nvmd_completion); 3665 complete(pm8001_ha->nvmd_completion);
3411 ccb->task = NULL; 3666 ccb->task = NULL;
3412 ccb->ccb_tag = 0xFFFFFFFF; 3667 ccb->ccb_tag = 0xFFFFFFFF;
@@ -3414,8 +3669,7 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3414 return 0; 3669 return 0;
3415} 3670}
3416 3671
3417static int 3672int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
3418mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
3419{ 3673{
3420 u32 status; 3674 u32 status;
3421 int i; 3675 int i;
@@ -3431,8 +3685,7 @@ mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
3431 return 0; 3685 return 0;
3432} 3686}
3433 3687
3434static int 3688int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3435mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3436{ 3689{
3437 struct sas_task *t; 3690 struct sas_task *t;
3438 struct pm8001_ccb_info *ccb; 3691 struct pm8001_ccb_info *ccb;
@@ -3440,19 +3693,29 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3440 u32 status ; 3693 u32 status ;
3441 u32 tag, scp; 3694 u32 tag, scp;
3442 struct task_status_struct *ts; 3695 struct task_status_struct *ts;
3696 struct pm8001_device *pm8001_dev;
3443 3697
3444 struct task_abort_resp *pPayload = 3698 struct task_abort_resp *pPayload =
3445 (struct task_abort_resp *)(piomb + 4); 3699 (struct task_abort_resp *)(piomb + 4);
3446 3700
3447 status = le32_to_cpu(pPayload->status); 3701 status = le32_to_cpu(pPayload->status);
3448 tag = le32_to_cpu(pPayload->tag); 3702 tag = le32_to_cpu(pPayload->tag);
3703 if (!tag) {
3704 PM8001_FAIL_DBG(pm8001_ha,
3705 pm8001_printk(" TAG NULL. RETURNING !!!"));
3706 return -1;
3707 }
3708
3449 scp = le32_to_cpu(pPayload->scp); 3709 scp = le32_to_cpu(pPayload->scp);
3450 ccb = &pm8001_ha->ccb_info[tag]; 3710 ccb = &pm8001_ha->ccb_info[tag];
3451 t = ccb->task; 3711 t = ccb->task;
3452 PM8001_IO_DBG(pm8001_ha, 3712 pm8001_dev = ccb->device; /* retrieve device */
3453 pm8001_printk(" status = 0x%x\n", status)); 3713
3454 if (t == NULL) 3714 if (!t) {
3715 PM8001_FAIL_DBG(pm8001_ha,
3716 pm8001_printk(" TASK NULL. RETURNING !!!"));
3455 return -1; 3717 return -1;
3718 }
3456 ts = &t->task_status; 3719 ts = &t->task_status;
3457 if (status != 0) 3720 if (status != 0)
3458 PM8001_FAIL_DBG(pm8001_ha, 3721 PM8001_FAIL_DBG(pm8001_ha,
@@ -3476,7 +3739,15 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3476 spin_unlock_irqrestore(&t->task_state_lock, flags); 3739 spin_unlock_irqrestore(&t->task_state_lock, flags);
3477 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 3740 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
3478 mb(); 3741 mb();
3479 t->task_done(t); 3742
3743 if ((pm8001_dev->id & NCQ_ABORT_ALL_FLAG) && t) {
3744 pm8001_tag_free(pm8001_ha, tag);
3745 sas_free_task(t);
3746 /* clear the flag */
3747 pm8001_dev->id &= 0xBFFFFFFF;
3748 } else
3749 t->task_done(t);
3750
3480 return 0; 3751 return 0;
3481} 3752}
3482 3753
@@ -3727,17 +3998,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3727 case OPC_OUB_LOCAL_PHY_CNTRL: 3998 case OPC_OUB_LOCAL_PHY_CNTRL:
3728 PM8001_MSG_DBG(pm8001_ha, 3999 PM8001_MSG_DBG(pm8001_ha,
3729 pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n")); 4000 pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
3730 mpi_local_phy_ctl(pm8001_ha, piomb); 4001 pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
3731 break; 4002 break;
3732 case OPC_OUB_DEV_REGIST: 4003 case OPC_OUB_DEV_REGIST:
3733 PM8001_MSG_DBG(pm8001_ha, 4004 PM8001_MSG_DBG(pm8001_ha,
3734 pm8001_printk("OPC_OUB_DEV_REGIST\n")); 4005 pm8001_printk("OPC_OUB_DEV_REGIST\n"));
3735 mpi_reg_resp(pm8001_ha, piomb); 4006 pm8001_mpi_reg_resp(pm8001_ha, piomb);
3736 break; 4007 break;
3737 case OPC_OUB_DEREG_DEV: 4008 case OPC_OUB_DEREG_DEV:
3738 PM8001_MSG_DBG(pm8001_ha, 4009 PM8001_MSG_DBG(pm8001_ha,
3739 pm8001_printk("unregister the device\n")); 4010 pm8001_printk("unregister the device\n"));
3740 mpi_dereg_resp(pm8001_ha, piomb); 4011 pm8001_mpi_dereg_resp(pm8001_ha, piomb);
3741 break; 4012 break;
3742 case OPC_OUB_GET_DEV_HANDLE: 4013 case OPC_OUB_GET_DEV_HANDLE:
3743 PM8001_MSG_DBG(pm8001_ha, 4014 PM8001_MSG_DBG(pm8001_ha,
@@ -3775,7 +4046,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3775 case OPC_OUB_FW_FLASH_UPDATE: 4046 case OPC_OUB_FW_FLASH_UPDATE:
3776 PM8001_MSG_DBG(pm8001_ha, 4047 PM8001_MSG_DBG(pm8001_ha,
3777 pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n")); 4048 pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
3778 mpi_fw_flash_update_resp(pm8001_ha, piomb); 4049 pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
3779 break; 4050 break;
3780 case OPC_OUB_GPIO_RESPONSE: 4051 case OPC_OUB_GPIO_RESPONSE:
3781 PM8001_MSG_DBG(pm8001_ha, 4052 PM8001_MSG_DBG(pm8001_ha,
@@ -3788,17 +4059,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3788 case OPC_OUB_GENERAL_EVENT: 4059 case OPC_OUB_GENERAL_EVENT:
3789 PM8001_MSG_DBG(pm8001_ha, 4060 PM8001_MSG_DBG(pm8001_ha,
3790 pm8001_printk("OPC_OUB_GENERAL_EVENT\n")); 4061 pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
3791 mpi_general_event(pm8001_ha, piomb); 4062 pm8001_mpi_general_event(pm8001_ha, piomb);
3792 break; 4063 break;
3793 case OPC_OUB_SSP_ABORT_RSP: 4064 case OPC_OUB_SSP_ABORT_RSP:
3794 PM8001_MSG_DBG(pm8001_ha, 4065 PM8001_MSG_DBG(pm8001_ha,
3795 pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n")); 4066 pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
3796 mpi_task_abort_resp(pm8001_ha, piomb); 4067 pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
3797 break; 4068 break;
3798 case OPC_OUB_SATA_ABORT_RSP: 4069 case OPC_OUB_SATA_ABORT_RSP:
3799 PM8001_MSG_DBG(pm8001_ha, 4070 PM8001_MSG_DBG(pm8001_ha,
3800 pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n")); 4071 pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
3801 mpi_task_abort_resp(pm8001_ha, piomb); 4072 pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
3802 break; 4073 break;
3803 case OPC_OUB_SAS_DIAG_MODE_START_END: 4074 case OPC_OUB_SAS_DIAG_MODE_START_END:
3804 PM8001_MSG_DBG(pm8001_ha, 4075 PM8001_MSG_DBG(pm8001_ha,
@@ -3823,17 +4094,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3823 case OPC_OUB_SMP_ABORT_RSP: 4094 case OPC_OUB_SMP_ABORT_RSP:
3824 PM8001_MSG_DBG(pm8001_ha, 4095 PM8001_MSG_DBG(pm8001_ha,
3825 pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n")); 4096 pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
3826 mpi_task_abort_resp(pm8001_ha, piomb); 4097 pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
3827 break; 4098 break;
3828 case OPC_OUB_GET_NVMD_DATA: 4099 case OPC_OUB_GET_NVMD_DATA:
3829 PM8001_MSG_DBG(pm8001_ha, 4100 PM8001_MSG_DBG(pm8001_ha,
3830 pm8001_printk("OPC_OUB_GET_NVMD_DATA\n")); 4101 pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
3831 mpi_get_nvmd_resp(pm8001_ha, piomb); 4102 pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
3832 break; 4103 break;
3833 case OPC_OUB_SET_NVMD_DATA: 4104 case OPC_OUB_SET_NVMD_DATA:
3834 PM8001_MSG_DBG(pm8001_ha, 4105 PM8001_MSG_DBG(pm8001_ha,
3835 pm8001_printk("OPC_OUB_SET_NVMD_DATA\n")); 4106 pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
3836 mpi_set_nvmd_resp(pm8001_ha, piomb); 4107 pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
3837 break; 4108 break;
3838 case OPC_OUB_DEVICE_HANDLE_REMOVAL: 4109 case OPC_OUB_DEVICE_HANDLE_REMOVAL:
3839 PM8001_MSG_DBG(pm8001_ha, 4110 PM8001_MSG_DBG(pm8001_ha,
@@ -3842,7 +4113,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3842 case OPC_OUB_SET_DEVICE_STATE: 4113 case OPC_OUB_SET_DEVICE_STATE:
3843 PM8001_MSG_DBG(pm8001_ha, 4114 PM8001_MSG_DBG(pm8001_ha,
3844 pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n")); 4115 pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
3845 mpi_set_dev_state_resp(pm8001_ha, piomb); 4116 pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
3846 break; 4117 break;
3847 case OPC_OUB_GET_DEVICE_STATE: 4118 case OPC_OUB_GET_DEVICE_STATE:
3848 PM8001_MSG_DBG(pm8001_ha, 4119 PM8001_MSG_DBG(pm8001_ha,
@@ -3864,7 +4135,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3864 } 4135 }
3865} 4136}
3866 4137
3867static int process_oq(struct pm8001_hba_info *pm8001_ha) 4138static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
3868{ 4139{
3869 struct outbound_queue_table *circularQ; 4140 struct outbound_queue_table *circularQ;
3870 void *pMsg1 = NULL; 4141 void *pMsg1 = NULL;
@@ -3873,14 +4144,15 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha)
3873 unsigned long flags; 4144 unsigned long flags;
3874 4145
3875 spin_lock_irqsave(&pm8001_ha->lock, flags); 4146 spin_lock_irqsave(&pm8001_ha->lock, flags);
3876 circularQ = &pm8001_ha->outbnd_q_tbl[0]; 4147 circularQ = &pm8001_ha->outbnd_q_tbl[vec];
3877 do { 4148 do {
3878 ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); 4149 ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
3879 if (MPI_IO_STATUS_SUCCESS == ret) { 4150 if (MPI_IO_STATUS_SUCCESS == ret) {
3880 /* process the outbound message */ 4151 /* process the outbound message */
3881 process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); 4152 process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
3882 /* free the message from the outbound circular buffer */ 4153 /* free the message from the outbound circular buffer */
3883 mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc); 4154 pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
4155 circularQ, bc);
3884 } 4156 }
3885 if (MPI_IO_STATUS_BUSY == ret) { 4157 if (MPI_IO_STATUS_BUSY == ret) {
3886 /* Update the producer index from SPC */ 4158 /* Update the producer index from SPC */
@@ -3903,7 +4175,7 @@ static const u8 data_dir_flags[] = {
3903 [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */ 4175 [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */
3904 [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */ 4176 [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */
3905}; 4177};
3906static void 4178void
3907pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd) 4179pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
3908{ 4180{
3909 int i; 4181 int i;
@@ -3978,7 +4250,7 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
3978 smp_cmd.long_smp_req.long_resp_size = 4250 smp_cmd.long_smp_req.long_resp_size =
3979 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); 4251 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
3980 build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd); 4252 build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
3981 mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd); 4253 pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0);
3982 return 0; 4254 return 0;
3983 4255
3984err_out_2: 4256err_out_2:
@@ -4042,7 +4314,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
4042 ssp_cmd.len = cpu_to_le32(task->total_xfer_len); 4314 ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
4043 ssp_cmd.esgl = 0; 4315 ssp_cmd.esgl = 0;
4044 } 4316 }
4045 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd); 4317 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0);
4046 return ret; 4318 return ret;
4047} 4319}
4048 4320
@@ -4060,6 +4332,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
4060 u32 ATAP = 0x0; 4332 u32 ATAP = 0x0;
4061 u32 dir; 4333 u32 dir;
4062 struct inbound_queue_table *circularQ; 4334 struct inbound_queue_table *circularQ;
4335 unsigned long flags;
4063 u32 opc = OPC_INB_SATA_HOST_OPSTART; 4336 u32 opc = OPC_INB_SATA_HOST_OPSTART;
4064 memset(&sata_cmd, 0, sizeof(sata_cmd)); 4337 memset(&sata_cmd, 0, sizeof(sata_cmd));
4065 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4338 circularQ = &pm8001_ha->inbnd_q_tbl[0];
@@ -4080,8 +4353,10 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
4080 PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n")); 4353 PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
4081 } 4354 }
4082 } 4355 }
4083 if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) 4356 if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
4357 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
4084 ncg_tag = hdr_tag; 4358 ncg_tag = hdr_tag;
4359 }
4085 dir = data_dir_flags[task->data_dir] << 8; 4360 dir = data_dir_flags[task->data_dir] << 8;
4086 sata_cmd.tag = cpu_to_le32(tag); 4361 sata_cmd.tag = cpu_to_le32(tag);
4087 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); 4362 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
@@ -4112,7 +4387,55 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
4112 sata_cmd.len = cpu_to_le32(task->total_xfer_len); 4387 sata_cmd.len = cpu_to_le32(task->total_xfer_len);
4113 sata_cmd.esgl = 0; 4388 sata_cmd.esgl = 0;
4114 } 4389 }
4115 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd); 4390
4391 /* Check for read log for failed drive and return */
4392 if (sata_cmd.sata_fis.command == 0x2f) {
4393 if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
4394 (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) ||
4395 (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) {
4396 struct task_status_struct *ts;
4397
4398 pm8001_ha_dev->id &= 0xDFFFFFFF;
4399 ts = &task->task_status;
4400
4401 spin_lock_irqsave(&task->task_state_lock, flags);
4402 ts->resp = SAS_TASK_COMPLETE;
4403 ts->stat = SAM_STAT_GOOD;
4404 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
4405 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
4406 task->task_state_flags |= SAS_TASK_STATE_DONE;
4407 if (unlikely((task->task_state_flags &
4408 SAS_TASK_STATE_ABORTED))) {
4409 spin_unlock_irqrestore(&task->task_state_lock,
4410 flags);
4411 PM8001_FAIL_DBG(pm8001_ha,
4412 pm8001_printk("task 0x%p resp 0x%x "
4413 " stat 0x%x but aborted by upper layer "
4414 "\n", task, ts->resp, ts->stat));
4415 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
4416 } else if (task->uldd_task) {
4417 spin_unlock_irqrestore(&task->task_state_lock,
4418 flags);
4419 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
4420 mb();/* ditto */
4421 spin_unlock_irq(&pm8001_ha->lock);
4422 task->task_done(task);
4423 spin_lock_irq(&pm8001_ha->lock);
4424 return 0;
4425 } else if (!task->uldd_task) {
4426 spin_unlock_irqrestore(&task->task_state_lock,
4427 flags);
4428 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
4429 mb();/*ditto*/
4430 spin_unlock_irq(&pm8001_ha->lock);
4431 task->task_done(task);
4432 spin_lock_irq(&pm8001_ha->lock);
4433 return 0;
4434 }
4435 }
4436 }
4437
4438 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
4116 return ret; 4439 return ret;
4117} 4440}
4118 4441
@@ -4142,12 +4465,12 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
4142 payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | 4465 payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
4143 LINKMODE_AUTO | LINKRATE_15 | 4466 LINKMODE_AUTO | LINKRATE_15 |
4144 LINKRATE_30 | LINKRATE_60 | phy_id); 4467 LINKRATE_30 | LINKRATE_60 | phy_id);
4145 payload.sas_identify.dev_type = SAS_END_DEV; 4468 payload.sas_identify.dev_type = SAS_END_DEVICE;
4146 payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; 4469 payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
4147 memcpy(payload.sas_identify.sas_addr, 4470 memcpy(payload.sas_identify.sas_addr,
4148 pm8001_ha->sas_addr, SAS_ADDR_SIZE); 4471 pm8001_ha->sas_addr, SAS_ADDR_SIZE);
4149 payload.sas_identify.phy_id = phy_id; 4472 payload.sas_identify.phy_id = phy_id;
4150 ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); 4473 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
4151 return ret; 4474 return ret;
4152} 4475}
4153 4476
@@ -4157,7 +4480,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
4157 * @num: the inbound queue number 4480 * @num: the inbound queue number
4158 * @phy_id: the phy id which we wanted to start up. 4481 * @phy_id: the phy id which we wanted to start up.
4159 */ 4482 */
4160static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, 4483int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
4161 u8 phy_id) 4484 u8 phy_id)
4162{ 4485{
4163 struct phy_stop_req payload; 4486 struct phy_stop_req payload;
@@ -4169,12 +4492,12 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
4169 memset(&payload, 0, sizeof(payload)); 4492 memset(&payload, 0, sizeof(payload));
4170 payload.tag = cpu_to_le32(tag); 4493 payload.tag = cpu_to_le32(tag);
4171 payload.phy_id = cpu_to_le32(phy_id); 4494 payload.phy_id = cpu_to_le32(phy_id);
4172 ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); 4495 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
4173 return ret; 4496 return ret;
4174} 4497}
4175 4498
4176/** 4499/**
4177 * see comments on mpi_reg_resp. 4500 * see comments on pm8001_mpi_reg_resp.
4178 */ 4501 */
4179static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, 4502static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
4180 struct pm8001_device *pm8001_dev, u32 flag) 4503 struct pm8001_device *pm8001_dev, u32 flag)
@@ -4204,11 +4527,11 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
4204 if (flag == 1) 4527 if (flag == 1)
4205 stp_sspsmp_sata = 0x02; /*direct attached sata */ 4528 stp_sspsmp_sata = 0x02; /*direct attached sata */
4206 else { 4529 else {
4207 if (pm8001_dev->dev_type == SATA_DEV) 4530 if (pm8001_dev->dev_type == SAS_SATA_DEV)
4208 stp_sspsmp_sata = 0x00; /* stp*/ 4531 stp_sspsmp_sata = 0x00; /* stp*/
4209 else if (pm8001_dev->dev_type == SAS_END_DEV || 4532 else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
4210 pm8001_dev->dev_type == EDGE_DEV || 4533 pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
4211 pm8001_dev->dev_type == FANOUT_DEV) 4534 pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
4212 stp_sspsmp_sata = 0x01; /*ssp or smp*/ 4535 stp_sspsmp_sata = 0x01; /*ssp or smp*/
4213 } 4536 }
4214 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) 4537 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
@@ -4228,14 +4551,14 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
4228 cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); 4551 cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
4229 memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, 4552 memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
4230 SAS_ADDR_SIZE); 4553 SAS_ADDR_SIZE);
4231 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 4554 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
4232 return rc; 4555 return rc;
4233} 4556}
4234 4557
4235/** 4558/**
4236 * see comments on mpi_reg_resp. 4559 * see comments on pm8001_mpi_reg_resp.
4237 */ 4560 */
4238static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, 4561int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
4239 u32 device_id) 4562 u32 device_id)
4240{ 4563{
4241 struct dereg_dev_req payload; 4564 struct dereg_dev_req payload;
@@ -4249,7 +4572,7 @@ static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
4249 payload.device_id = cpu_to_le32(device_id); 4572 payload.device_id = cpu_to_le32(device_id);
4250 PM8001_MSG_DBG(pm8001_ha, 4573 PM8001_MSG_DBG(pm8001_ha,
4251 pm8001_printk("unregister device device_id = %d\n", device_id)); 4574 pm8001_printk("unregister device device_id = %d\n", device_id));
4252 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 4575 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
4253 return ret; 4576 return ret;
4254} 4577}
4255 4578
@@ -4272,7 +4595,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
4272 payload.tag = cpu_to_le32(1); 4595 payload.tag = cpu_to_le32(1);
4273 payload.phyop_phyid = 4596 payload.phyop_phyid =
4274 cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F)); 4597 cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
4275 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 4598 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
4276 return ret; 4599 return ret;
4277} 4600}
4278 4601
@@ -4296,11 +4619,11 @@ static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
4296 * @stat: stat. 4619 * @stat: stat.
4297 */ 4620 */
4298static irqreturn_t 4621static irqreturn_t
4299pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha) 4622pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
4300{ 4623{
4301 pm8001_chip_interrupt_disable(pm8001_ha); 4624 pm8001_chip_interrupt_disable(pm8001_ha, vec);
4302 process_oq(pm8001_ha); 4625 process_oq(pm8001_ha, vec);
4303 pm8001_chip_interrupt_enable(pm8001_ha); 4626 pm8001_chip_interrupt_enable(pm8001_ha, vec);
4304 return IRQ_HANDLED; 4627 return IRQ_HANDLED;
4305} 4628}
4306 4629
@@ -4322,7 +4645,7 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
4322 task_abort.device_id = cpu_to_le32(dev_id); 4645 task_abort.device_id = cpu_to_le32(dev_id);
4323 task_abort.tag = cpu_to_le32(cmd_tag); 4646 task_abort.tag = cpu_to_le32(cmd_tag);
4324 } 4647 }
4325 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort); 4648 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
4326 return ret; 4649 return ret;
4327} 4650}
4328 4651
@@ -4331,16 +4654,17 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
4331 * @task: the task we wanted to aborted. 4654 * @task: the task we wanted to aborted.
4332 * @flag: the abort flag. 4655 * @flag: the abort flag.
4333 */ 4656 */
4334static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, 4657int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
4335 struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag) 4658 struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag)
4336{ 4659{
4337 u32 opc, device_id; 4660 u32 opc, device_id;
4338 int rc = TMF_RESP_FUNC_FAILED; 4661 int rc = TMF_RESP_FUNC_FAILED;
4339 PM8001_EH_DBG(pm8001_ha, pm8001_printk("cmd_tag = %x, abort task tag" 4662 PM8001_EH_DBG(pm8001_ha,
4340 " = %x", cmd_tag, task_tag)); 4663 pm8001_printk("cmd_tag = %x, abort task tag = 0x%x",
4341 if (pm8001_dev->dev_type == SAS_END_DEV) 4664 cmd_tag, task_tag));
4665 if (pm8001_dev->dev_type == SAS_END_DEVICE)
4342 opc = OPC_INB_SSP_ABORT; 4666 opc = OPC_INB_SSP_ABORT;
4343 else if (pm8001_dev->dev_type == SATA_DEV) 4667 else if (pm8001_dev->dev_type == SAS_SATA_DEV)
4344 opc = OPC_INB_SATA_ABORT; 4668 opc = OPC_INB_SATA_ABORT;
4345 else 4669 else
4346 opc = OPC_INB_SMP_ABORT;/* SMP */ 4670 opc = OPC_INB_SMP_ABORT;/* SMP */
@@ -4358,7 +4682,7 @@ static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
4358 * @ccb: the ccb information. 4682 * @ccb: the ccb information.
4359 * @tmf: task management function. 4683 * @tmf: task management function.
4360 */ 4684 */
4361static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, 4685int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
4362 struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf) 4686 struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
4363{ 4687{
4364 struct sas_task *task = ccb->task; 4688 struct sas_task *task = ccb->task;
@@ -4376,11 +4700,11 @@ static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
4376 memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); 4700 memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
4377 sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); 4701 sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
4378 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4702 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4379 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd); 4703 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0);
4380 return ret; 4704 return ret;
4381} 4705}
4382 4706
4383static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, 4707int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4384 void *payload) 4708 void *payload)
4385{ 4709{
4386 u32 opc = OPC_INB_GET_NVMD_DATA; 4710 u32 opc = OPC_INB_GET_NVMD_DATA;
@@ -4397,7 +4721,7 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4397 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); 4721 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
4398 if (!fw_control_context) 4722 if (!fw_control_context)
4399 return -ENOMEM; 4723 return -ENOMEM;
4400 fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0]; 4724 fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific;
4401 fw_control_context->len = ioctl_payload->length; 4725 fw_control_context->len = ioctl_payload->length;
4402 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4726 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4403 memset(&nvmd_req, 0, sizeof(nvmd_req)); 4727 memset(&nvmd_req, 0, sizeof(nvmd_req));
@@ -4456,11 +4780,11 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4456 default: 4780 default:
4457 break; 4781 break;
4458 } 4782 }
4459 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); 4783 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
4460 return rc; 4784 return rc;
4461} 4785}
4462 4786
4463static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, 4787int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4464 void *payload) 4788 void *payload)
4465{ 4789{
4466 u32 opc = OPC_INB_SET_NVMD_DATA; 4790 u32 opc = OPC_INB_SET_NVMD_DATA;
@@ -4479,7 +4803,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4479 return -ENOMEM; 4803 return -ENOMEM;
4480 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4804 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4481 memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr, 4805 memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
4482 ioctl_payload->func_specific, 4806 &ioctl_payload->func_specific,
4483 ioctl_payload->length); 4807 ioctl_payload->length);
4484 memset(&nvmd_req, 0, sizeof(nvmd_req)); 4808 memset(&nvmd_req, 0, sizeof(nvmd_req));
4485 rc = pm8001_tag_alloc(pm8001_ha, &tag); 4809 rc = pm8001_tag_alloc(pm8001_ha, &tag);
@@ -4536,7 +4860,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4536 default: 4860 default:
4537 break; 4861 break;
4538 } 4862 }
4539 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); 4863 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
4540 return rc; 4864 return rc;
4541} 4865}
4542 4866
@@ -4545,7 +4869,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
4545 * @pm8001_ha: our hba card information. 4869 * @pm8001_ha: our hba card information.
4546 * @fw_flash_updata_info: firmware flash update param 4870 * @fw_flash_updata_info: firmware flash update param
4547 */ 4871 */
4548static int 4872int
4549pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, 4873pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
4550 void *fw_flash_updata_info, u32 tag) 4874 void *fw_flash_updata_info, u32 tag)
4551{ 4875{
@@ -4567,11 +4891,11 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
4567 cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr))); 4891 cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
4568 payload.sgl_addr_hi = 4892 payload.sgl_addr_hi =
4569 cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr))); 4893 cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
4570 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 4894 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
4571 return ret; 4895 return ret;
4572} 4896}
4573 4897
4574static int 4898int
4575pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, 4899pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
4576 void *payload) 4900 void *payload)
4577{ 4901{
@@ -4581,29 +4905,14 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
4581 int rc; 4905 int rc;
4582 u32 tag; 4906 u32 tag;
4583 struct pm8001_ccb_info *ccb; 4907 struct pm8001_ccb_info *ccb;
4584 void *buffer = NULL; 4908 void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr;
4585 dma_addr_t phys_addr; 4909 dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr;
4586 u32 phys_addr_hi;
4587 u32 phys_addr_lo;
4588 struct pm8001_ioctl_payload *ioctl_payload = payload; 4910 struct pm8001_ioctl_payload *ioctl_payload = payload;
4589 4911
4590 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); 4912 fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
4591 if (!fw_control_context) 4913 if (!fw_control_context)
4592 return -ENOMEM; 4914 return -ENOMEM;
4593 fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0]; 4915 fw_control = (struct fw_control_info *)&ioctl_payload->func_specific;
4594 if (fw_control->len != 0) {
4595 if (pm8001_mem_alloc(pm8001_ha->pdev,
4596 (void **)&buffer,
4597 &phys_addr,
4598 &phys_addr_hi,
4599 &phys_addr_lo,
4600 fw_control->len, 0) != 0) {
4601 PM8001_FAIL_DBG(pm8001_ha,
4602 pm8001_printk("Mem alloc failure\n"));
4603 kfree(fw_control_context);
4604 return -ENOMEM;
4605 }
4606 }
4607 memcpy(buffer, fw_control->buffer, fw_control->len); 4916 memcpy(buffer, fw_control->buffer, fw_control->len);
4608 flash_update_info.sgl.addr = cpu_to_le64(phys_addr); 4917 flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
4609 flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len); 4918 flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
@@ -4613,6 +4922,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
4613 flash_update_info.total_image_len = fw_control->size; 4922 flash_update_info.total_image_len = fw_control->size;
4614 fw_control_context->fw_control = fw_control; 4923 fw_control_context->fw_control = fw_control;
4615 fw_control_context->virtAddr = buffer; 4924 fw_control_context->virtAddr = buffer;
4925 fw_control_context->phys_addr = phys_addr;
4616 fw_control_context->len = fw_control->len; 4926 fw_control_context->len = fw_control->len;
4617 rc = pm8001_tag_alloc(pm8001_ha, &tag); 4927 rc = pm8001_tag_alloc(pm8001_ha, &tag);
4618 if (rc) { 4928 if (rc) {
@@ -4627,7 +4937,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
4627 return rc; 4937 return rc;
4628} 4938}
4629 4939
4630static int 4940int
4631pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, 4941pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
4632 struct pm8001_device *pm8001_dev, u32 state) 4942 struct pm8001_device *pm8001_dev, u32 state)
4633{ 4943{
@@ -4648,7 +4958,7 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
4648 payload.tag = cpu_to_le32(tag); 4958 payload.tag = cpu_to_le32(tag);
4649 payload.device_id = cpu_to_le32(pm8001_dev->device_id); 4959 payload.device_id = cpu_to_le32(pm8001_dev->device_id);
4650 payload.nds = cpu_to_le32(state); 4960 payload.nds = cpu_to_le32(state);
4651 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 4961 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
4652 return rc; 4962 return rc;
4653 4963
4654} 4964}
@@ -4673,7 +4983,7 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
4673 payload.SSAHOLT = cpu_to_le32(0xd << 25); 4983 payload.SSAHOLT = cpu_to_le32(0xd << 25);
4674 payload.sata_hol_tmo = cpu_to_le32(80); 4984 payload.sata_hol_tmo = cpu_to_le32(80);
4675 payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff); 4985 payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
4676 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 4986 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
4677 return rc; 4987 return rc;
4678 4988
4679} 4989}
@@ -4706,4 +5016,3 @@ const struct pm8001_dispatch pm8001_8001_dispatch = {
4706 .set_dev_state_req = pm8001_chip_set_dev_state_req, 5016 .set_dev_state_req = pm8001_chip_set_dev_state_req,
4707 .sas_re_init_req = pm8001_chip_sas_re_initialization, 5017 .sas_re_init_req = pm8001_chip_sas_re_initialization,
4708}; 5018};
4709
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index d437309cb1e1..d7c1e2034226 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -131,6 +131,8 @@
131#define LINKRATE_30 (0x02 << 8) 131#define LINKRATE_30 (0x02 << 8)
132#define LINKRATE_60 (0x04 << 8) 132#define LINKRATE_60 (0x04 << 8)
133 133
134/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */
135#define GSM_SM_BASE 0x4F0000
134struct mpi_msg_hdr{ 136struct mpi_msg_hdr{
135 __le32 header; /* Bits [11:0] - Message operation code */ 137 __le32 header; /* Bits [11:0] - Message operation code */
136 /* Bits [15:12] - Message Category */ 138 /* Bits [15:12] - Message Category */
@@ -298,7 +300,7 @@ struct local_phy_ctl_resp {
298 300
299 301
300#define OP_BITS 0x0000FF00 302#define OP_BITS 0x0000FF00
301#define ID_BITS 0x0000000F 303#define ID_BITS 0x000000FF
302 304
303/* 305/*
304 * brief the data structure of PORT Control Command 306 * brief the data structure of PORT Control Command
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 3d5e522e00fc..e4b9bc7f5410 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver 2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3 * 3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd. 4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved. 5 * All rights reserved.
@@ -44,8 +44,16 @@
44 44
45static struct scsi_transport_template *pm8001_stt; 45static struct scsi_transport_template *pm8001_stt;
46 46
47/**
48 * chip info structure to identify chip key functionality as
49 * encryption available/not, no of ports, hw specific function ref
50 */
47static const struct pm8001_chip_info pm8001_chips[] = { 51static const struct pm8001_chip_info pm8001_chips[] = {
48 [chip_8001] = { 8, &pm8001_8001_dispatch,}, 52 [chip_8001] = {0, 8, &pm8001_8001_dispatch,},
53 [chip_8008] = {0, 8, &pm8001_80xx_dispatch,},
54 [chip_8009] = {1, 8, &pm8001_80xx_dispatch,},
55 [chip_8018] = {0, 16, &pm8001_80xx_dispatch,},
56 [chip_8019] = {1, 16, &pm8001_80xx_dispatch,},
49}; 57};
50static int pm8001_id; 58static int pm8001_id;
51 59
@@ -155,37 +163,75 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
155} 163}
156 164
157#ifdef PM8001_USE_TASKLET 165#ifdef PM8001_USE_TASKLET
166
167/**
168 * tasklet for 64 msi-x interrupt handler
169 * @opaque: the passed general host adapter struct
170 * Note: pm8001_tasklet is common for pm8001 & pm80xx
171 */
158static void pm8001_tasklet(unsigned long opaque) 172static void pm8001_tasklet(unsigned long opaque)
159{ 173{
160 struct pm8001_hba_info *pm8001_ha; 174 struct pm8001_hba_info *pm8001_ha;
175 u32 vec;
161 pm8001_ha = (struct pm8001_hba_info *)opaque; 176 pm8001_ha = (struct pm8001_hba_info *)opaque;
162 if (unlikely(!pm8001_ha)) 177 if (unlikely(!pm8001_ha))
163 BUG_ON(1); 178 BUG_ON(1);
164 PM8001_CHIP_DISP->isr(pm8001_ha); 179 vec = pm8001_ha->int_vector;
180 PM8001_CHIP_DISP->isr(pm8001_ha, vec);
181}
182#endif
183
184static struct pm8001_hba_info *outq_to_hba(u8 *outq)
185{
186 return container_of((outq - *outq), struct pm8001_hba_info, outq[0]);
165} 187}
188
189/**
190 * pm8001_interrupt_handler_msix - main MSIX interrupt handler.
191 * It obtains the vector number and calls the equivalent bottom
192 * half or services directly.
193 * @opaque: the passed outbound queue/vector. Host structure is
194 * retrieved from the same.
195 */
196static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
197{
198 struct pm8001_hba_info *pm8001_ha = outq_to_hba(opaque);
199 u8 outq = *(u8 *)opaque;
200 irqreturn_t ret = IRQ_HANDLED;
201 if (unlikely(!pm8001_ha))
202 return IRQ_NONE;
203 if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
204 return IRQ_NONE;
205 pm8001_ha->int_vector = outq;
206#ifdef PM8001_USE_TASKLET
207 tasklet_schedule(&pm8001_ha->tasklet);
208#else
209 ret = PM8001_CHIP_DISP->isr(pm8001_ha, outq);
166#endif 210#endif
211 return ret;
212}
167 213
214/**
215 * pm8001_interrupt_handler_intx - main INTx interrupt handler.
216 * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure.
217 */
168 218
169 /** 219static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
170 * pm8001_interrupt - when HBA originate a interrupt,we should invoke this
171 * dispatcher to handle each case.
172 * @irq: irq number.
173 * @opaque: the passed general host adapter struct
174 */
175static irqreturn_t pm8001_interrupt(int irq, void *opaque)
176{ 220{
177 struct pm8001_hba_info *pm8001_ha; 221 struct pm8001_hba_info *pm8001_ha;
178 irqreturn_t ret = IRQ_HANDLED; 222 irqreturn_t ret = IRQ_HANDLED;
179 struct sas_ha_struct *sha = opaque; 223 struct sas_ha_struct *sha = dev_id;
180 pm8001_ha = sha->lldd_ha; 224 pm8001_ha = sha->lldd_ha;
181 if (unlikely(!pm8001_ha)) 225 if (unlikely(!pm8001_ha))
182 return IRQ_NONE; 226 return IRQ_NONE;
183 if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) 227 if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
184 return IRQ_NONE; 228 return IRQ_NONE;
229
230 pm8001_ha->int_vector = 0;
185#ifdef PM8001_USE_TASKLET 231#ifdef PM8001_USE_TASKLET
186 tasklet_schedule(&pm8001_ha->tasklet); 232 tasklet_schedule(&pm8001_ha->tasklet);
187#else 233#else
188 ret = PM8001_CHIP_DISP->isr(pm8001_ha); 234 ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0);
189#endif 235#endif
190 return ret; 236 return ret;
191} 237}
@@ -195,10 +241,14 @@ static irqreturn_t pm8001_interrupt(int irq, void *opaque)
195 * @pm8001_ha:our hba structure. 241 * @pm8001_ha:our hba structure.
196 * 242 *
197 */ 243 */
198static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) 244static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
245 const struct pci_device_id *ent)
199{ 246{
200 int i; 247 int i;
201 spin_lock_init(&pm8001_ha->lock); 248 spin_lock_init(&pm8001_ha->lock);
249 PM8001_INIT_DBG(pm8001_ha,
250 pm8001_printk("pm8001_alloc: PHY:%x\n",
251 pm8001_ha->chip->n_phy));
202 for (i = 0; i < pm8001_ha->chip->n_phy; i++) { 252 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
203 pm8001_phy_init(pm8001_ha, i); 253 pm8001_phy_init(pm8001_ha, i);
204 pm8001_ha->port[i].wide_port_phymap = 0; 254 pm8001_ha->port[i].wide_port_phymap = 0;
@@ -222,30 +272,57 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
222 pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE; 272 pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
223 pm8001_ha->memoryMap.region[IOP].alignment = 32; 273 pm8001_ha->memoryMap.region[IOP].alignment = 32;
224 274
225 /* MPI Memory region 3 for consumer Index of inbound queues */ 275 for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
226 pm8001_ha->memoryMap.region[CI].num_elements = 1; 276 /* MPI Memory region 3 for consumer Index of inbound queues */
227 pm8001_ha->memoryMap.region[CI].element_size = 4; 277 pm8001_ha->memoryMap.region[CI+i].num_elements = 1;
228 pm8001_ha->memoryMap.region[CI].total_len = 4; 278 pm8001_ha->memoryMap.region[CI+i].element_size = 4;
229 pm8001_ha->memoryMap.region[CI].alignment = 4; 279 pm8001_ha->memoryMap.region[CI+i].total_len = 4;
230 280 pm8001_ha->memoryMap.region[CI+i].alignment = 4;
231 /* MPI Memory region 4 for producer Index of outbound queues */ 281
232 pm8001_ha->memoryMap.region[PI].num_elements = 1; 282 if ((ent->driver_data) != chip_8001) {
233 pm8001_ha->memoryMap.region[PI].element_size = 4; 283 /* MPI Memory region 5 inbound queues */
234 pm8001_ha->memoryMap.region[PI].total_len = 4; 284 pm8001_ha->memoryMap.region[IB+i].num_elements =
235 pm8001_ha->memoryMap.region[PI].alignment = 4; 285 PM8001_MPI_QUEUE;
236 286 pm8001_ha->memoryMap.region[IB+i].element_size = 128;
237 /* MPI Memory region 5 inbound queues */ 287 pm8001_ha->memoryMap.region[IB+i].total_len =
238 pm8001_ha->memoryMap.region[IB].num_elements = PM8001_MPI_QUEUE; 288 PM8001_MPI_QUEUE * 128;
239 pm8001_ha->memoryMap.region[IB].element_size = 64; 289 pm8001_ha->memoryMap.region[IB+i].alignment = 128;
240 pm8001_ha->memoryMap.region[IB].total_len = PM8001_MPI_QUEUE * 64; 290 } else {
241 pm8001_ha->memoryMap.region[IB].alignment = 64; 291 pm8001_ha->memoryMap.region[IB+i].num_elements =
242 292 PM8001_MPI_QUEUE;
243 /* MPI Memory region 6 outbound queues */ 293 pm8001_ha->memoryMap.region[IB+i].element_size = 64;
244 pm8001_ha->memoryMap.region[OB].num_elements = PM8001_MPI_QUEUE; 294 pm8001_ha->memoryMap.region[IB+i].total_len =
245 pm8001_ha->memoryMap.region[OB].element_size = 64; 295 PM8001_MPI_QUEUE * 64;
246 pm8001_ha->memoryMap.region[OB].total_len = PM8001_MPI_QUEUE * 64; 296 pm8001_ha->memoryMap.region[IB+i].alignment = 64;
247 pm8001_ha->memoryMap.region[OB].alignment = 64; 297 }
298 }
299
300 for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
301 /* MPI Memory region 4 for producer Index of outbound queues */
302 pm8001_ha->memoryMap.region[PI+i].num_elements = 1;
303 pm8001_ha->memoryMap.region[PI+i].element_size = 4;
304 pm8001_ha->memoryMap.region[PI+i].total_len = 4;
305 pm8001_ha->memoryMap.region[PI+i].alignment = 4;
306
307 if (ent->driver_data != chip_8001) {
308 /* MPI Memory region 6 Outbound queues */
309 pm8001_ha->memoryMap.region[OB+i].num_elements =
310 PM8001_MPI_QUEUE;
311 pm8001_ha->memoryMap.region[OB+i].element_size = 128;
312 pm8001_ha->memoryMap.region[OB+i].total_len =
313 PM8001_MPI_QUEUE * 128;
314 pm8001_ha->memoryMap.region[OB+i].alignment = 128;
315 } else {
316 /* MPI Memory region 6 Outbound queues */
317 pm8001_ha->memoryMap.region[OB+i].num_elements =
318 PM8001_MPI_QUEUE;
319 pm8001_ha->memoryMap.region[OB+i].element_size = 64;
320 pm8001_ha->memoryMap.region[OB+i].total_len =
321 PM8001_MPI_QUEUE * 64;
322 pm8001_ha->memoryMap.region[OB+i].alignment = 64;
323 }
248 324
325 }
249 /* Memory region write DMA*/ 326 /* Memory region write DMA*/
250 pm8001_ha->memoryMap.region[NVMD].num_elements = 1; 327 pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
251 pm8001_ha->memoryMap.region[NVMD].element_size = 4096; 328 pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
@@ -264,6 +341,9 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
264 pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB * 341 pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB *
265 sizeof(struct pm8001_ccb_info); 342 sizeof(struct pm8001_ccb_info);
266 343
344 /* Memory region for fw flash */
345 pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096;
346
267 for (i = 0; i < USI_MAX_MEMCNT; i++) { 347 for (i = 0; i < USI_MAX_MEMCNT; i++) {
268 if (pm8001_mem_alloc(pm8001_ha->pdev, 348 if (pm8001_mem_alloc(pm8001_ha->pdev,
269 &pm8001_ha->memoryMap.region[i].virt_ptr, 349 &pm8001_ha->memoryMap.region[i].virt_ptr,
@@ -281,7 +361,7 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
281 361
282 pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr; 362 pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr;
283 for (i = 0; i < PM8001_MAX_DEVICES; i++) { 363 for (i = 0; i < PM8001_MAX_DEVICES; i++) {
284 pm8001_ha->devices[i].dev_type = NO_DEVICE; 364 pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED;
285 pm8001_ha->devices[i].id = i; 365 pm8001_ha->devices[i].id = i;
286 pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES; 366 pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
287 pm8001_ha->devices[i].running_req = 0; 367 pm8001_ha->devices[i].running_req = 0;
@@ -339,10 +419,12 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
339 ioremap(pm8001_ha->io_mem[logicalBar].membase, 419 ioremap(pm8001_ha->io_mem[logicalBar].membase,
340 pm8001_ha->io_mem[logicalBar].memsize); 420 pm8001_ha->io_mem[logicalBar].memsize);
341 PM8001_INIT_DBG(pm8001_ha, 421 PM8001_INIT_DBG(pm8001_ha,
342 pm8001_printk("PCI: bar %d, logicalBar %d " 422 pm8001_printk("PCI: bar %d, logicalBar %d ",
343 "virt_addr=%lx,len=%d\n", bar, logicalBar, 423 bar, logicalBar));
344 (unsigned long) 424 PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
345 pm8001_ha->io_mem[logicalBar].memvirtaddr, 425 "base addr %llx virt_addr=%llx len=%d\n",
426 (u64)pm8001_ha->io_mem[logicalBar].membase,
427 (u64)pm8001_ha->io_mem[logicalBar].memvirtaddr,
346 pm8001_ha->io_mem[logicalBar].memsize)); 428 pm8001_ha->io_mem[logicalBar].memsize));
347 } else { 429 } else {
348 pm8001_ha->io_mem[logicalBar].membase = 0; 430 pm8001_ha->io_mem[logicalBar].membase = 0;
@@ -361,8 +443,9 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
361 * @shost: scsi host struct which has been initialized before. 443 * @shost: scsi host struct which has been initialized before.
362 */ 444 */
363static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, 445static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
364 u32 chip_id, 446 const struct pci_device_id *ent,
365 struct Scsi_Host *shost) 447 struct Scsi_Host *shost)
448
366{ 449{
367 struct pm8001_hba_info *pm8001_ha; 450 struct pm8001_hba_info *pm8001_ha;
368 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 451 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
@@ -374,7 +457,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
374 457
375 pm8001_ha->pdev = pdev; 458 pm8001_ha->pdev = pdev;
376 pm8001_ha->dev = &pdev->dev; 459 pm8001_ha->dev = &pdev->dev;
377 pm8001_ha->chip_id = chip_id; 460 pm8001_ha->chip_id = ent->driver_data;
378 pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id]; 461 pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id];
379 pm8001_ha->irq = pdev->irq; 462 pm8001_ha->irq = pdev->irq;
380 pm8001_ha->sas = sha; 463 pm8001_ha->sas = sha;
@@ -382,12 +465,22 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
382 pm8001_ha->id = pm8001_id++; 465 pm8001_ha->id = pm8001_id++;
383 pm8001_ha->logging_level = 0x01; 466 pm8001_ha->logging_level = 0x01;
384 sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id); 467 sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
468 /* IOMB size is 128 for 8088/89 controllers */
469 if (pm8001_ha->chip_id != chip_8001)
470 pm8001_ha->iomb_size = IOMB_SIZE_SPCV;
471 else
472 pm8001_ha->iomb_size = IOMB_SIZE_SPC;
473
385#ifdef PM8001_USE_TASKLET 474#ifdef PM8001_USE_TASKLET
475 /**
476 * default tasklet for non msi-x interrupt handler/first msi-x
477 * interrupt handler
478 **/
386 tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, 479 tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
387 (unsigned long)pm8001_ha); 480 (unsigned long)pm8001_ha);
388#endif 481#endif
389 pm8001_ioremap(pm8001_ha); 482 pm8001_ioremap(pm8001_ha);
390 if (!pm8001_alloc(pm8001_ha)) 483 if (!pm8001_alloc(pm8001_ha, ent))
391 return pm8001_ha; 484 return pm8001_ha;
392 pm8001_free(pm8001_ha); 485 pm8001_free(pm8001_ha);
393 return NULL; 486 return NULL;
@@ -512,21 +605,50 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
512 */ 605 */
513static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) 606static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
514{ 607{
515 u8 i; 608 u8 i, j;
516#ifdef PM8001_READ_VPD 609#ifdef PM8001_READ_VPD
610 /* For new SPC controllers WWN is stored in flash vpd
611 * For SPC/SPCve controllers WWN is stored in EEPROM
612 * For Older SPC WWN is stored in NVMD
613 */
517 DECLARE_COMPLETION_ONSTACK(completion); 614 DECLARE_COMPLETION_ONSTACK(completion);
518 struct pm8001_ioctl_payload payload; 615 struct pm8001_ioctl_payload payload;
616 u16 deviceid;
617 pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
519 pm8001_ha->nvmd_completion = &completion; 618 pm8001_ha->nvmd_completion = &completion;
520 payload.minor_function = 0; 619
521 payload.length = 128; 620 if (pm8001_ha->chip_id == chip_8001) {
522 payload.func_specific = kzalloc(128, GFP_KERNEL); 621 if (deviceid == 0x8081) {
622 payload.minor_function = 4;
623 payload.length = 4096;
624 } else {
625 payload.minor_function = 0;
626 payload.length = 128;
627 }
628 } else {
629 payload.minor_function = 1;
630 payload.length = 4096;
631 }
632 payload.offset = 0;
633 payload.func_specific = kzalloc(payload.length, GFP_KERNEL);
523 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); 634 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
524 wait_for_completion(&completion); 635 wait_for_completion(&completion);
636
637 for (i = 0, j = 0; i <= 7; i++, j++) {
638 if (pm8001_ha->chip_id == chip_8001) {
639 if (deviceid == 0x8081)
640 pm8001_ha->sas_addr[j] =
641 payload.func_specific[0x704 + i];
642 } else
643 pm8001_ha->sas_addr[j] =
644 payload.func_specific[0x804 + i];
645 }
646
525 for (i = 0; i < pm8001_ha->chip->n_phy; i++) { 647 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
526 memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr, 648 memcpy(&pm8001_ha->phy[i].dev_sas_addr,
527 SAS_ADDR_SIZE); 649 pm8001_ha->sas_addr, SAS_ADDR_SIZE);
528 PM8001_INIT_DBG(pm8001_ha, 650 PM8001_INIT_DBG(pm8001_ha,
529 pm8001_printk("phy %d sas_addr = %016llx \n", i, 651 pm8001_printk("phy %d sas_addr = %016llx\n", i,
530 pm8001_ha->phy[i].dev_sas_addr)); 652 pm8001_ha->phy[i].dev_sas_addr));
531 } 653 }
532#else 654#else
@@ -547,31 +669,50 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
547 * @chip_info: our ha struct. 669 * @chip_info: our ha struct.
548 * @irq_handler: irq_handler 670 * @irq_handler: irq_handler
549 */ 671 */
550static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha, 672static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
551 irq_handler_t irq_handler)
552{ 673{
553 u32 i = 0, j = 0; 674 u32 i = 0, j = 0;
554 u32 number_of_intr = 1; 675 u32 number_of_intr;
555 int flag = 0; 676 int flag = 0;
556 u32 max_entry; 677 u32 max_entry;
557 int rc; 678 int rc;
679 static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3];
680
681 /* SPCv controllers supports 64 msi-x */
682 if (pm8001_ha->chip_id == chip_8001) {
683 number_of_intr = 1;
684 flag |= IRQF_DISABLED;
685 } else {
686 number_of_intr = PM8001_MAX_MSIX_VEC;
687 flag &= ~IRQF_SHARED;
688 flag |= IRQF_DISABLED;
689 }
690
558 max_entry = sizeof(pm8001_ha->msix_entries) / 691 max_entry = sizeof(pm8001_ha->msix_entries) /
559 sizeof(pm8001_ha->msix_entries[0]); 692 sizeof(pm8001_ha->msix_entries[0]);
560 flag |= IRQF_DISABLED;
561 for (i = 0; i < max_entry ; i++) 693 for (i = 0; i < max_entry ; i++)
562 pm8001_ha->msix_entries[i].entry = i; 694 pm8001_ha->msix_entries[i].entry = i;
563 rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries, 695 rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries,
564 number_of_intr); 696 number_of_intr);
565 pm8001_ha->number_of_intr = number_of_intr; 697 pm8001_ha->number_of_intr = number_of_intr;
566 if (!rc) { 698 if (!rc) {
699 PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
700 "pci_enable_msix request ret:%d no of intr %d\n",
701 rc, pm8001_ha->number_of_intr));
702
703 for (i = 0; i < number_of_intr; i++)
704 pm8001_ha->outq[i] = i;
705
567 for (i = 0; i < number_of_intr; i++) { 706 for (i = 0; i < number_of_intr; i++) {
707 snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
708 DRV_NAME"%d", i);
568 if (request_irq(pm8001_ha->msix_entries[i].vector, 709 if (request_irq(pm8001_ha->msix_entries[i].vector,
569 irq_handler, flag, DRV_NAME, 710 pm8001_interrupt_handler_msix, flag,
570 SHOST_TO_SAS_HA(pm8001_ha->shost))) { 711 intr_drvname[i], &pm8001_ha->outq[i])) {
571 for (j = 0; j < i; j++) 712 for (j = 0; j < i; j++)
572 free_irq( 713 free_irq(
573 pm8001_ha->msix_entries[j].vector, 714 pm8001_ha->msix_entries[j].vector,
574 SHOST_TO_SAS_HA(pm8001_ha->shost)); 715 &pm8001_ha->outq[j]);
575 pci_disable_msix(pm8001_ha->pdev); 716 pci_disable_msix(pm8001_ha->pdev);
576 break; 717 break;
577 } 718 }
@@ -588,22 +729,24 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
588static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha) 729static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
589{ 730{
590 struct pci_dev *pdev; 731 struct pci_dev *pdev;
591 irq_handler_t irq_handler = pm8001_interrupt;
592 int rc; 732 int rc;
593 733
594 pdev = pm8001_ha->pdev; 734 pdev = pm8001_ha->pdev;
595 735
596#ifdef PM8001_USE_MSIX 736#ifdef PM8001_USE_MSIX
597 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) 737 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
598 return pm8001_setup_msix(pm8001_ha, irq_handler); 738 return pm8001_setup_msix(pm8001_ha);
599 else 739 else {
740 PM8001_INIT_DBG(pm8001_ha,
741 pm8001_printk("MSIX not supported!!!\n"));
600 goto intx; 742 goto intx;
743 }
601#endif 744#endif
602 745
603intx: 746intx:
604 /* initialize the INT-X interrupt */ 747 /* initialize the INT-X interrupt */
605 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, 748 rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
606 SHOST_TO_SAS_HA(pm8001_ha->shost)); 749 DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost));
607 return rc; 750 return rc;
608} 751}
609 752
@@ -621,12 +764,13 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
621{ 764{
622 unsigned int rc; 765 unsigned int rc;
623 u32 pci_reg; 766 u32 pci_reg;
767 u8 i = 0;
624 struct pm8001_hba_info *pm8001_ha; 768 struct pm8001_hba_info *pm8001_ha;
625 struct Scsi_Host *shost = NULL; 769 struct Scsi_Host *shost = NULL;
626 const struct pm8001_chip_info *chip; 770 const struct pm8001_chip_info *chip;
627 771
628 dev_printk(KERN_INFO, &pdev->dev, 772 dev_printk(KERN_INFO, &pdev->dev,
629 "pm8001: driver version %s\n", DRV_VERSION); 773 "pm80xx: driver version %s\n", DRV_VERSION);
630 rc = pci_enable_device(pdev); 774 rc = pci_enable_device(pdev);
631 if (rc) 775 if (rc)
632 goto err_out_enable; 776 goto err_out_enable;
@@ -665,25 +809,39 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
665 goto err_out_free; 809 goto err_out_free;
666 } 810 }
667 pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); 811 pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
668 pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost); 812 /* ent->driver variable is used to differentiate between controllers */
813 pm8001_ha = pm8001_pci_alloc(pdev, ent, shost);
669 if (!pm8001_ha) { 814 if (!pm8001_ha) {
670 rc = -ENOMEM; 815 rc = -ENOMEM;
671 goto err_out_free; 816 goto err_out_free;
672 } 817 }
673 list_add_tail(&pm8001_ha->list, &hba_list); 818 list_add_tail(&pm8001_ha->list, &hba_list);
674 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); 819 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
675 rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); 820 rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
676 if (rc) 821 if (rc) {
822 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
823 "chip_init failed [ret: %d]\n", rc));
677 goto err_out_ha_free; 824 goto err_out_ha_free;
825 }
678 826
679 rc = scsi_add_host(shost, &pdev->dev); 827 rc = scsi_add_host(shost, &pdev->dev);
680 if (rc) 828 if (rc)
681 goto err_out_ha_free; 829 goto err_out_ha_free;
682 rc = pm8001_request_irq(pm8001_ha); 830 rc = pm8001_request_irq(pm8001_ha);
683 if (rc) 831 if (rc) {
832 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
833 "pm8001_request_irq failed [ret: %d]\n", rc));
684 goto err_out_shost; 834 goto err_out_shost;
835 }
836
837 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
838 if (pm8001_ha->chip_id != chip_8001) {
839 for (i = 1; i < pm8001_ha->number_of_intr; i++)
840 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
841 /* setup thermal configuration. */
842 pm80xx_set_thermal_config(pm8001_ha);
843 }
685 844
686 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
687 pm8001_init_sas_add(pm8001_ha); 845 pm8001_init_sas_add(pm8001_ha);
688 pm8001_post_sas_ha_init(shost, chip); 846 pm8001_post_sas_ha_init(shost, chip);
689 rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); 847 rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
@@ -719,14 +877,15 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
719 sas_remove_host(pm8001_ha->shost); 877 sas_remove_host(pm8001_ha->shost);
720 list_del(&pm8001_ha->list); 878 list_del(&pm8001_ha->list);
721 scsi_remove_host(pm8001_ha->shost); 879 scsi_remove_host(pm8001_ha->shost);
722 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); 880 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
723 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); 881 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
724 882
725#ifdef PM8001_USE_MSIX 883#ifdef PM8001_USE_MSIX
726 for (i = 0; i < pm8001_ha->number_of_intr; i++) 884 for (i = 0; i < pm8001_ha->number_of_intr; i++)
727 synchronize_irq(pm8001_ha->msix_entries[i].vector); 885 synchronize_irq(pm8001_ha->msix_entries[i].vector);
728 for (i = 0; i < pm8001_ha->number_of_intr; i++) 886 for (i = 0; i < pm8001_ha->number_of_intr; i++)
729 free_irq(pm8001_ha->msix_entries[i].vector, sha); 887 free_irq(pm8001_ha->msix_entries[i].vector,
888 &pm8001_ha->outq[i]);
730 pci_disable_msix(pdev); 889 pci_disable_msix(pdev);
731#else 890#else
732 free_irq(pm8001_ha->irq, sha); 891 free_irq(pm8001_ha->irq, sha);
@@ -763,13 +922,14 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
763 printk(KERN_ERR " PCI PM not supported\n"); 922 printk(KERN_ERR " PCI PM not supported\n");
764 return -ENODEV; 923 return -ENODEV;
765 } 924 }
766 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); 925 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
767 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); 926 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
768#ifdef PM8001_USE_MSIX 927#ifdef PM8001_USE_MSIX
769 for (i = 0; i < pm8001_ha->number_of_intr; i++) 928 for (i = 0; i < pm8001_ha->number_of_intr; i++)
770 synchronize_irq(pm8001_ha->msix_entries[i].vector); 929 synchronize_irq(pm8001_ha->msix_entries[i].vector);
771 for (i = 0; i < pm8001_ha->number_of_intr; i++) 930 for (i = 0; i < pm8001_ha->number_of_intr; i++)
772 free_irq(pm8001_ha->msix_entries[i].vector, sha); 931 free_irq(pm8001_ha->msix_entries[i].vector,
932 &pm8001_ha->outq[i]);
773 pci_disable_msix(pdev); 933 pci_disable_msix(pdev);
774#else 934#else
775 free_irq(pm8001_ha->irq, sha); 935 free_irq(pm8001_ha->irq, sha);
@@ -798,6 +958,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
798 struct sas_ha_struct *sha = pci_get_drvdata(pdev); 958 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
799 struct pm8001_hba_info *pm8001_ha; 959 struct pm8001_hba_info *pm8001_ha;
800 int rc; 960 int rc;
961 u8 i = 0;
801 u32 device_state; 962 u32 device_state;
802 pm8001_ha = sha->lldd_ha; 963 pm8001_ha = sha->lldd_ha;
803 device_state = pdev->current_state; 964 device_state = pdev->current_state;
@@ -820,19 +981,33 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
820 if (rc) 981 if (rc)
821 goto err_out_disable; 982 goto err_out_disable;
822 983
823 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); 984 /* chip soft rst only for spc */
985 if (pm8001_ha->chip_id == chip_8001) {
986 PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
987 PM8001_INIT_DBG(pm8001_ha,
988 pm8001_printk("chip soft reset successful\n"));
989 }
824 rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); 990 rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
825 if (rc) 991 if (rc)
826 goto err_out_disable; 992 goto err_out_disable;
827 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); 993
994 /* disable all the interrupt bits */
995 PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
996
828 rc = pm8001_request_irq(pm8001_ha); 997 rc = pm8001_request_irq(pm8001_ha);
829 if (rc) 998 if (rc)
830 goto err_out_disable; 999 goto err_out_disable;
831 #ifdef PM8001_USE_TASKLET 1000#ifdef PM8001_USE_TASKLET
1001 /* default tasklet for non msi-x interrupt handler/first msi-x
1002 * interrupt handler */
832 tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, 1003 tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
833 (unsigned long)pm8001_ha); 1004 (unsigned long)pm8001_ha);
834 #endif 1005#endif
835 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha); 1006 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
1007 if (pm8001_ha->chip_id != chip_8001) {
1008 for (i = 1; i < pm8001_ha->number_of_intr; i++)
1009 PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
1010 }
836 scsi_unblock_requests(pm8001_ha->shost); 1011 scsi_unblock_requests(pm8001_ha->shost);
837 return 0; 1012 return 0;
838 1013
@@ -843,14 +1018,45 @@ err_out_enable:
843 return rc; 1018 return rc;
844} 1019}
845 1020
1021/* update of pci device, vendor id and driver data with
1022 * unique value for each of the controller
1023 */
846static struct pci_device_id pm8001_pci_table[] = { 1024static struct pci_device_id pm8001_pci_table[] = {
847 { 1025 { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
848 PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001
849 },
850 { 1026 {
851 PCI_DEVICE(0x117c, 0x0042), 1027 PCI_DEVICE(0x117c, 0x0042),
852 .driver_data = chip_8001 1028 .driver_data = chip_8001
853 }, 1029 },
1030 /* Support for SPC/SPCv/SPCve controllers */
1031 { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
1032 { PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 },
1033 { PCI_VDEVICE(ADAPTEC2, 0x8008), chip_8008 },
1034 { PCI_VDEVICE(PMC_Sierra, 0x8018), chip_8018 },
1035 { PCI_VDEVICE(ADAPTEC2, 0x8018), chip_8018 },
1036 { PCI_VDEVICE(PMC_Sierra, 0x8009), chip_8009 },
1037 { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 },
1038 { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 },
1039 { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 },
1040 { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
1041 PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 },
1042 { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
1043 PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8001 },
1044 { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
1045 PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8008 },
1046 { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
1047 PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8008 },
1048 { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
1049 PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8009 },
1050 { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
1051 PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8009 },
1052 { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
1053 PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8018 },
1054 { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
1055 PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8018 },
1056 { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
1057 PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 },
1058 { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
1059 PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 },
854 {} /* terminate list */ 1060 {} /* terminate list */
855}; 1061};
856 1062
@@ -870,7 +1076,7 @@ static int __init pm8001_init(void)
870{ 1076{
871 int rc = -ENOMEM; 1077 int rc = -ENOMEM;
872 1078
873 pm8001_wq = alloc_workqueue("pm8001", 0, 0); 1079 pm8001_wq = alloc_workqueue("pm80xx", 0, 0);
874 if (!pm8001_wq) 1080 if (!pm8001_wq)
875 goto err; 1081 goto err;
876 1082
@@ -902,7 +1108,8 @@ module_init(pm8001_init);
902module_exit(pm8001_exit); 1108module_exit(pm8001_exit);
903 1109
904MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>"); 1110MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
905MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver"); 1111MODULE_DESCRIPTION(
1112 "PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver");
906MODULE_VERSION(DRV_VERSION); 1113MODULE_VERSION(DRV_VERSION);
907MODULE_LICENSE("GPL"); 1114MODULE_LICENSE("GPL");
908MODULE_DEVICE_TABLE(pci, pm8001_pci_table); 1115MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index b961112395d5..a85d73de7c80 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver 2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3 * 3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd. 4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved. 5 * All rights reserved.
@@ -68,7 +68,7 @@ static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag)
68 clear_bit(tag, bitmap); 68 clear_bit(tag, bitmap);
69} 69}
70 70
71static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) 71void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
72{ 72{
73 pm8001_tag_clear(pm8001_ha, tag); 73 pm8001_tag_clear(pm8001_ha, tag);
74} 74}
@@ -212,10 +212,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
212 break; 212 break;
213 case PHY_FUNC_GET_EVENTS: 213 case PHY_FUNC_GET_EVENTS:
214 spin_lock_irqsave(&pm8001_ha->lock, flags); 214 spin_lock_irqsave(&pm8001_ha->lock, flags);
215 if (-1 == pm8001_bar4_shift(pm8001_ha, 215 if (pm8001_ha->chip_id == chip_8001) {
216 if (-1 == pm8001_bar4_shift(pm8001_ha,
216 (phy_id < 4) ? 0x30000 : 0x40000)) { 217 (phy_id < 4) ? 0x30000 : 0x40000)) {
217 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 218 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
218 return -EINVAL; 219 return -EINVAL;
220 }
219 } 221 }
220 { 222 {
221 struct sas_phy *phy = sas_phy->phy; 223 struct sas_phy *phy = sas_phy->phy;
@@ -228,7 +230,8 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
228 phy->loss_of_dword_sync_count = qp[3]; 230 phy->loss_of_dword_sync_count = qp[3];
229 phy->phy_reset_problem_count = qp[4]; 231 phy->phy_reset_problem_count = qp[4];
230 } 232 }
231 pm8001_bar4_shift(pm8001_ha, 0); 233 if (pm8001_ha->chip_id == chip_8001)
234 pm8001_bar4_shift(pm8001_ha, 0);
232 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 235 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
233 return 0; 236 return 0;
234 default: 237 default:
@@ -249,7 +252,9 @@ void pm8001_scan_start(struct Scsi_Host *shost)
249 struct pm8001_hba_info *pm8001_ha; 252 struct pm8001_hba_info *pm8001_ha;
250 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 253 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
251 pm8001_ha = sha->lldd_ha; 254 pm8001_ha = sha->lldd_ha;
252 PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); 255 /* SAS_RE_INITIALIZATION not available in SPCv/ve */
256 if (pm8001_ha->chip_id == chip_8001)
257 PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
253 for (i = 0; i < pm8001_ha->chip->n_phy; ++i) 258 for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
254 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); 259 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
255} 260}
@@ -352,7 +357,7 @@ static int sas_find_local_port_id(struct domain_device *dev)
352 * @tmf: the task management IU 357 * @tmf: the task management IU
353 */ 358 */
354#define DEV_IS_GONE(pm8001_dev) \ 359#define DEV_IS_GONE(pm8001_dev) \
355 ((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))) 360 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
356static int pm8001_task_exec(struct sas_task *task, const int num, 361static int pm8001_task_exec(struct sas_task *task, const int num,
357 gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) 362 gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
358{ 363{
@@ -370,7 +375,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
370 struct task_status_struct *tsm = &t->task_status; 375 struct task_status_struct *tsm = &t->task_status;
371 tsm->resp = SAS_TASK_UNDELIVERED; 376 tsm->resp = SAS_TASK_UNDELIVERED;
372 tsm->stat = SAS_PHY_DOWN; 377 tsm->stat = SAS_PHY_DOWN;
373 if (dev->dev_type != SATA_DEV) 378 if (dev->dev_type != SAS_SATA_DEV)
374 t->task_done(t); 379 t->task_done(t);
375 return 0; 380 return 0;
376 } 381 }
@@ -548,7 +553,7 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
548{ 553{
549 u32 dev; 554 u32 dev;
550 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { 555 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
551 if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) { 556 if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
552 pm8001_ha->devices[dev].id = dev; 557 pm8001_ha->devices[dev].id = dev;
553 return &pm8001_ha->devices[dev]; 558 return &pm8001_ha->devices[dev];
554 } 559 }
@@ -560,13 +565,31 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
560 } 565 }
561 return NULL; 566 return NULL;
562} 567}
568/**
569 * pm8001_find_dev - find a matching pm8001_device
570 * @pm8001_ha: our hba card information
571 */
572struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
573 u32 device_id)
574{
575 u32 dev;
576 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
577 if (pm8001_ha->devices[dev].device_id == device_id)
578 return &pm8001_ha->devices[dev];
579 }
580 if (dev == PM8001_MAX_DEVICES) {
581 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING "
582 "DEVICE FOUND !!!\n"));
583 }
584 return NULL;
585}
563 586
564static void pm8001_free_dev(struct pm8001_device *pm8001_dev) 587static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
565{ 588{
566 u32 id = pm8001_dev->id; 589 u32 id = pm8001_dev->id;
567 memset(pm8001_dev, 0, sizeof(*pm8001_dev)); 590 memset(pm8001_dev, 0, sizeof(*pm8001_dev));
568 pm8001_dev->id = id; 591 pm8001_dev->id = id;
569 pm8001_dev->dev_type = NO_DEVICE; 592 pm8001_dev->dev_type = SAS_PHY_UNUSED;
570 pm8001_dev->device_id = PM8001_MAX_DEVICES; 593 pm8001_dev->device_id = PM8001_MAX_DEVICES;
571 pm8001_dev->sas_device = NULL; 594 pm8001_dev->sas_device = NULL;
572} 595}
@@ -624,7 +647,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
624 res = -1; 647 res = -1;
625 } 648 }
626 } else { 649 } else {
627 if (dev->dev_type == SATA_DEV) { 650 if (dev->dev_type == SAS_SATA_DEV) {
628 pm8001_device->attached_phy = 651 pm8001_device->attached_phy =
629 dev->rphy->identify.phy_identifier; 652 dev->rphy->identify.phy_identifier;
630 flag = 1; /* directly sata*/ 653 flag = 1; /* directly sata*/
@@ -634,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
634 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); 657 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
635 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 658 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
636 wait_for_completion(&completion); 659 wait_for_completion(&completion);
637 if (dev->dev_type == SAS_END_DEV) 660 if (dev->dev_type == SAS_END_DEVICE)
638 msleep(50); 661 msleep(50);
639 pm8001_ha->flags = PM8001F_RUN_TIME; 662 pm8001_ha->flags = PM8001F_RUN_TIME;
640 return 0; 663 return 0;
@@ -648,7 +671,7 @@ int pm8001_dev_found(struct domain_device *dev)
648 return pm8001_dev_found_notify(dev); 671 return pm8001_dev_found_notify(dev);
649} 672}
650 673
651static void pm8001_task_done(struct sas_task *task) 674void pm8001_task_done(struct sas_task *task)
652{ 675{
653 if (!del_timer(&task->slow_task->timer)) 676 if (!del_timer(&task->slow_task->timer))
654 return; 677 return;
@@ -904,7 +927,7 @@ void pm8001_open_reject_retry(
904 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i]; 927 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
905 928
906 pm8001_dev = ccb->device; 929 pm8001_dev = ccb->device;
907 if (!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE)) 930 if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
908 continue; 931 continue;
909 if (!device_to_close) { 932 if (!device_to_close) {
910 uintptr_t d = (uintptr_t)pm8001_dev 933 uintptr_t d = (uintptr_t)pm8001_dev
@@ -995,6 +1018,72 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
995 return rc; 1018 return rc;
996} 1019}
997 1020
1021/*
1022* This function handle the IT_NEXUS_XXX event or completion
1023* status code for SSP/SATA/SMP I/O request.
1024*/
1025int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
1026{
1027 int rc = TMF_RESP_FUNC_FAILED;
1028 struct pm8001_device *pm8001_dev;
1029 struct pm8001_hba_info *pm8001_ha;
1030 struct sas_phy *phy;
1031 u32 device_id = 0;
1032
1033 if (!dev || !dev->lldd_dev)
1034 return -1;
1035
1036 pm8001_dev = dev->lldd_dev;
1037 device_id = pm8001_dev->device_id;
1038 pm8001_ha = pm8001_find_ha_by_dev(dev);
1039
1040 PM8001_EH_DBG(pm8001_ha,
1041 pm8001_printk("I_T_Nexus handler invoked !!"));
1042
1043 phy = sas_get_local_phy(dev);
1044
1045 if (dev_is_sata(dev)) {
1046 DECLARE_COMPLETION_ONSTACK(completion_setstate);
1047 if (scsi_is_sas_phy_local(phy)) {
1048 rc = 0;
1049 goto out;
1050 }
1051 /* send internal ssp/sata/smp abort command to FW */
1052 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1053 dev, 1, 0);
1054 msleep(100);
1055
1056 /* deregister the target device */
1057 pm8001_dev_gone_notify(dev);
1058 msleep(200);
1059
1060 /*send phy reset to hard reset target */
1061 rc = sas_phy_reset(phy, 1);
1062 msleep(2000);
1063 pm8001_dev->setds_completion = &completion_setstate;
1064
1065 wait_for_completion(&completion_setstate);
1066 } else {
1067 /* send internal ssp/sata/smp abort command to FW */
1068 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1069 dev, 1, 0);
1070 msleep(100);
1071
1072 /* deregister the target device */
1073 pm8001_dev_gone_notify(dev);
1074 msleep(200);
1075
1076 /*send phy reset to hard reset target */
1077 rc = sas_phy_reset(phy, 1);
1078 msleep(2000);
1079 }
1080 PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
1081 pm8001_dev->device_id, rc));
1082out:
1083 sas_put_local_phy(phy);
1084
1085 return rc;
1086}
998/* mandatory SAM-3, the task reset the specified LUN*/ 1087/* mandatory SAM-3, the task reset the specified LUN*/
999int pm8001_lu_reset(struct domain_device *dev, u8 *lun) 1088int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
1000{ 1089{
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 11008205aeb3..570819464d90 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver 2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3 * 3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd. 4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved. 5 * All rights reserved.
@@ -57,8 +57,8 @@
57#include <linux/atomic.h> 57#include <linux/atomic.h>
58#include "pm8001_defs.h" 58#include "pm8001_defs.h"
59 59
60#define DRV_NAME "pm8001" 60#define DRV_NAME "pm80xx"
61#define DRV_VERSION "0.1.36" 61#define DRV_VERSION "0.1.37"
62#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */ 62#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
63#define PM8001_INIT_LOGGING 0x02 /* driver init logging */ 63#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
64#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */ 64#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
@@ -66,8 +66,8 @@
66#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/ 66#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/
67#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */ 67#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */
68#define PM8001_MSG_LOGGING 0x40 /* misc message logging */ 68#define PM8001_MSG_LOGGING 0x40 /* misc message logging */
69#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\ 69#define pm8001_printk(format, arg...) printk(KERN_INFO "pm80xx %s %d:" \
70 __func__, __LINE__, ## arg) 70 format, __func__, __LINE__, ## arg)
71#define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \ 71#define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \
72do { \ 72do { \
73 if (unlikely(HBA->logging_level & LEVEL)) \ 73 if (unlikely(HBA->logging_level & LEVEL)) \
@@ -103,11 +103,12 @@ do { \
103#define PM8001_READ_VPD 103#define PM8001_READ_VPD
104 104
105 105
106#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV)) 106#define DEV_IS_EXPANDER(type) ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
107 107
108#define PM8001_NAME_LENGTH 32/* generic length of strings */ 108#define PM8001_NAME_LENGTH 32/* generic length of strings */
109extern struct list_head hba_list; 109extern struct list_head hba_list;
110extern const struct pm8001_dispatch pm8001_8001_dispatch; 110extern const struct pm8001_dispatch pm8001_8001_dispatch;
111extern const struct pm8001_dispatch pm8001_80xx_dispatch;
111 112
112struct pm8001_hba_info; 113struct pm8001_hba_info;
113struct pm8001_ccb_info; 114struct pm8001_ccb_info;
@@ -131,15 +132,15 @@ struct pm8001_ioctl_payload {
131struct pm8001_dispatch { 132struct pm8001_dispatch {
132 char *name; 133 char *name;
133 int (*chip_init)(struct pm8001_hba_info *pm8001_ha); 134 int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
134 int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha, u32 signature); 135 int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha);
135 void (*chip_rst)(struct pm8001_hba_info *pm8001_ha); 136 void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
136 int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha); 137 int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
137 void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha); 138 void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha);
138 irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha); 139 irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec);
139 u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha); 140 u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha);
140 int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha); 141 int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec);
141 void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha); 142 void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
142 void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha); 143 void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
143 void (*make_prd)(struct scatterlist *scatter, int nr, void *prd); 144 void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
144 int (*smp_req)(struct pm8001_hba_info *pm8001_ha, 145 int (*smp_req)(struct pm8001_hba_info *pm8001_ha,
145 struct pm8001_ccb_info *ccb); 146 struct pm8001_ccb_info *ccb);
@@ -173,6 +174,7 @@ struct pm8001_dispatch {
173}; 174};
174 175
175struct pm8001_chip_info { 176struct pm8001_chip_info {
177 u32 encrypt;
176 u32 n_phy; 178 u32 n_phy;
177 const struct pm8001_dispatch *dispatch; 179 const struct pm8001_dispatch *dispatch;
178}; 180};
@@ -204,7 +206,7 @@ struct pm8001_phy {
204}; 206};
205 207
206struct pm8001_device { 208struct pm8001_device {
207 enum sas_dev_type dev_type; 209 enum sas_device_type dev_type;
208 struct domain_device *sas_device; 210 struct domain_device *sas_device;
209 u32 attached_phy; 211 u32 attached_phy;
210 u32 id; 212 u32 id;
@@ -256,7 +258,20 @@ struct mpi_mem_req {
256 struct mpi_mem region[USI_MAX_MEMCNT]; 258 struct mpi_mem region[USI_MAX_MEMCNT];
257}; 259};
258 260
259struct main_cfg_table { 261struct encrypt {
262 u32 cipher_mode;
263 u32 sec_mode;
264 u32 status;
265 u32 flag;
266};
267
268struct sas_phy_attribute_table {
269 u32 phystart1_16[16];
270 u32 outbound_hw_event_pid1_16[16];
271};
272
273union main_cfg_table {
274 struct {
260 u32 signature; 275 u32 signature;
261 u32 interface_rev; 276 u32 interface_rev;
262 u32 firmware_rev; 277 u32 firmware_rev;
@@ -292,19 +307,69 @@ struct main_cfg_table {
292 u32 fatal_err_dump_length1; 307 u32 fatal_err_dump_length1;
293 u32 hda_mode_flag; 308 u32 hda_mode_flag;
294 u32 anolog_setup_table_offset; 309 u32 anolog_setup_table_offset;
310 u32 rsvd[4];
311 } pm8001_tbl;
312
313 struct {
314 u32 signature;
315 u32 interface_rev;
316 u32 firmware_rev;
317 u32 max_out_io;
318 u32 max_sgl;
319 u32 ctrl_cap_flag;
320 u32 gst_offset;
321 u32 inbound_queue_offset;
322 u32 outbound_queue_offset;
323 u32 inbound_q_nppd_hppd;
324 u32 rsvd[8];
325 u32 crc_core_dump;
326 u32 rsvd1;
327 u32 upper_event_log_addr;
328 u32 lower_event_log_addr;
329 u32 event_log_size;
330 u32 event_log_severity;
331 u32 upper_pcs_event_log_addr;
332 u32 lower_pcs_event_log_addr;
333 u32 pcs_event_log_size;
334 u32 pcs_event_log_severity;
335 u32 fatal_err_interrupt;
336 u32 fatal_err_dump_offset0;
337 u32 fatal_err_dump_length0;
338 u32 fatal_err_dump_offset1;
339 u32 fatal_err_dump_length1;
340 u32 gpio_led_mapping;
341 u32 analog_setup_table_offset;
342 u32 int_vec_table_offset;
343 u32 phy_attr_table_offset;
344 u32 port_recovery_timer;
345 u32 interrupt_reassertion_delay;
346 } pm80xx_tbl;
295}; 347};
296struct general_status_table { 348
349union general_status_table {
350 struct {
297 u32 gst_len_mpistate; 351 u32 gst_len_mpistate;
298 u32 iq_freeze_state0; 352 u32 iq_freeze_state0;
299 u32 iq_freeze_state1; 353 u32 iq_freeze_state1;
300 u32 msgu_tcnt; 354 u32 msgu_tcnt;
301 u32 iop_tcnt; 355 u32 iop_tcnt;
302 u32 reserved; 356 u32 rsvd;
303 u32 phy_state[8]; 357 u32 phy_state[8];
304 u32 reserved1; 358 u32 gpio_input_val;
305 u32 reserved2; 359 u32 rsvd1[2];
306 u32 reserved3; 360 u32 recover_err_info[8];
361 } pm8001_tbl;
362 struct {
363 u32 gst_len_mpistate;
364 u32 iq_freeze_state0;
365 u32 iq_freeze_state1;
366 u32 msgu_tcnt;
367 u32 iop_tcnt;
368 u32 rsvd[9];
369 u32 gpio_input_val;
370 u32 rsvd1[2];
307 u32 recover_err_info[8]; 371 u32 recover_err_info[8];
372 } pm80xx_tbl;
308}; 373};
309struct inbound_queue_table { 374struct inbound_queue_table {
310 u32 element_pri_size_cnt; 375 u32 element_pri_size_cnt;
@@ -351,15 +416,21 @@ struct pm8001_hba_info {
351 struct device *dev; 416 struct device *dev;
352 struct pm8001_hba_memspace io_mem[6]; 417 struct pm8001_hba_memspace io_mem[6];
353 struct mpi_mem_req memoryMap; 418 struct mpi_mem_req memoryMap;
419 struct encrypt encrypt_info; /* support encryption */
354 void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/ 420 void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/
355 void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/ 421 void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/
356 void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/ 422 void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/
357 void __iomem *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/ 423 void __iomem *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/
358 void __iomem *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/ 424 void __iomem *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/
359 struct main_cfg_table main_cfg_tbl; 425 void __iomem *pspa_q_tbl_addr;
360 struct general_status_table gs_tbl; 426 /*MPI SAS PHY attributes Queue Config Table Addr*/
361 struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_INB_NUM]; 427 void __iomem *ivt_tbl_addr; /*MPI IVT Table Addr */
362 struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_OUTB_NUM]; 428 union main_cfg_table main_cfg_tbl;
429 union general_status_table gs_tbl;
430 struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM];
431 struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_SPCV_OUTB_NUM];
432 struct sas_phy_attribute_table phy_attr_table;
433 /* MPI SAS PHY attributes */
363 u8 sas_addr[SAS_ADDR_SIZE]; 434 u8 sas_addr[SAS_ADDR_SIZE];
364 struct sas_ha_struct *sas;/* SCSI/SAS glue */ 435 struct sas_ha_struct *sas;/* SCSI/SAS glue */
365 struct Scsi_Host *shost; 436 struct Scsi_Host *shost;
@@ -372,10 +443,12 @@ struct pm8001_hba_info {
372 struct pm8001_port port[PM8001_MAX_PHYS]; 443 struct pm8001_port port[PM8001_MAX_PHYS];
373 u32 id; 444 u32 id;
374 u32 irq; 445 u32 irq;
446 u32 iomb_size; /* SPC and SPCV IOMB size */
375 struct pm8001_device *devices; 447 struct pm8001_device *devices;
376 struct pm8001_ccb_info *ccb_info; 448 struct pm8001_ccb_info *ccb_info;
377#ifdef PM8001_USE_MSIX 449#ifdef PM8001_USE_MSIX
378 struct msix_entry msix_entries[16];/*for msi-x interrupt*/ 450 struct msix_entry msix_entries[PM8001_MAX_MSIX_VEC];
451 /*for msi-x interrupt*/
379 int number_of_intr;/*will be used in remove()*/ 452 int number_of_intr;/*will be used in remove()*/
380#endif 453#endif
381#ifdef PM8001_USE_TASKLET 454#ifdef PM8001_USE_TASKLET
@@ -383,7 +456,10 @@ struct pm8001_hba_info {
383#endif 456#endif
384 u32 logging_level; 457 u32 logging_level;
385 u32 fw_status; 458 u32 fw_status;
459 u32 smp_exp_mode;
460 u32 int_vector;
386 const struct firmware *fw_image; 461 const struct firmware *fw_image;
462 u8 outq[PM8001_MAX_MSIX_VEC];
387}; 463};
388 464
389struct pm8001_work { 465struct pm8001_work {
@@ -419,6 +495,9 @@ struct pm8001_fw_image_header {
419#define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10 495#define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10
420#define FLASH_UPDATE_DISABLED 0x11 496#define FLASH_UPDATE_DISABLED 0x11
421 497
498#define NCQ_READ_LOG_FLAG 0x80000000
499#define NCQ_ABORT_ALL_FLAG 0x40000000
500#define NCQ_2ND_RLE_FLAG 0x20000000
422/** 501/**
423 * brief param structure for firmware flash update. 502 * brief param structure for firmware flash update.
424 */ 503 */
@@ -484,6 +563,7 @@ int pm8001_dev_found(struct domain_device *dev);
484void pm8001_dev_gone(struct domain_device *dev); 563void pm8001_dev_gone(struct domain_device *dev);
485int pm8001_lu_reset(struct domain_device *dev, u8 *lun); 564int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
486int pm8001_I_T_nexus_reset(struct domain_device *dev); 565int pm8001_I_T_nexus_reset(struct domain_device *dev);
566int pm8001_I_T_nexus_event_handler(struct domain_device *dev);
487int pm8001_query_task(struct sas_task *task); 567int pm8001_query_task(struct sas_task *task);
488void pm8001_open_reject_retry( 568void pm8001_open_reject_retry(
489 struct pm8001_hba_info *pm8001_ha, 569 struct pm8001_hba_info *pm8001_ha,
@@ -493,6 +573,61 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
493 dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo, 573 dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo,
494 u32 mem_size, u32 align); 574 u32 mem_size, u32 align);
495 575
576void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha);
577int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
578 struct inbound_queue_table *circularQ,
579 u32 opCode, void *payload, u32 responseQueue);
580int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
581 u16 messageSize, void **messagePtr);
582u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
583 struct outbound_queue_table *circularQ, u8 bc);
584u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
585 struct outbound_queue_table *circularQ,
586 void **messagePtr1, u8 *pBC);
587int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
588 struct pm8001_device *pm8001_dev, u32 state);
589int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
590 void *payload);
591int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
592 void *fw_flash_updata_info, u32 tag);
593int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
594int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
595int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
596 struct pm8001_ccb_info *ccb,
597 struct pm8001_tmf_task *tmf);
598int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
599 struct pm8001_device *pm8001_dev,
600 u8 flag, u32 task_tag, u32 cmd_tag);
601int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id);
602void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd);
603void pm8001_work_fn(struct work_struct *work);
604int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha,
605 void *data, int handler);
606void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
607 void *piomb);
608void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
609 void *piomb);
610void pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
611 void *piomb);
612int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha,
613 void *piomb);
614void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate);
615void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr);
616void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i);
617int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
618int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
619int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
620 void *piomb);
621int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb);
622int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
623struct sas_task *pm8001_alloc_task(void);
624void pm8001_task_done(struct sas_task *task);
625void pm8001_free_task(struct sas_task *task);
626void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag);
627struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
628 u32 device_id);
629int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha);
630
496int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); 631int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
497 632
498/* ctl shared API */ 633/* ctl shared API */
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
new file mode 100644
index 000000000000..302514d8157b
--- /dev/null
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -0,0 +1,4130 @@
1/*
2 * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 PMC-Sierra, Inc.,
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40 #include <linux/slab.h>
41 #include "pm8001_sas.h"
42 #include "pm80xx_hwi.h"
43 #include "pm8001_chips.h"
44 #include "pm8001_ctl.h"
45
46#define SMP_DIRECT 1
47#define SMP_INDIRECT 2
48/**
49 * read_main_config_table - read the configure table and save it.
50 * @pm8001_ha: our hba card information
51 */
52static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
53{
54 void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
55
56 pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature =
57 pm8001_mr32(address, MAIN_SIGNATURE_OFFSET);
58 pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev =
59 pm8001_mr32(address, MAIN_INTERFACE_REVISION);
60 pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev =
61 pm8001_mr32(address, MAIN_FW_REVISION);
62 pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io =
63 pm8001_mr32(address, MAIN_MAX_OUTSTANDING_IO_OFFSET);
64 pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl =
65 pm8001_mr32(address, MAIN_MAX_SGL_OFFSET);
66 pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag =
67 pm8001_mr32(address, MAIN_CNTRL_CAP_OFFSET);
68 pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset =
69 pm8001_mr32(address, MAIN_GST_OFFSET);
70 pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset =
71 pm8001_mr32(address, MAIN_IBQ_OFFSET);
72 pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset =
73 pm8001_mr32(address, MAIN_OBQ_OFFSET);
74
75 /* read Error Dump Offset and Length */
76 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset0 =
77 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
78 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length0 =
79 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
80 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset1 =
81 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
82 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length1 =
83 pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
84
85 /* read GPIO LED settings from the configuration table */
86 pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping =
87 pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET);
88
89 /* read analog Setting offset from the configuration table */
90 pm8001_ha->main_cfg_tbl.pm80xx_tbl.analog_setup_table_offset =
91 pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
92
93 pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset =
94 pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET);
95 pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset =
96 pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET);
97}
98
99/**
100 * read_general_status_table - read the general status table and save it.
101 * @pm8001_ha: our hba card information
102 */
103static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
104{
105 void __iomem *address = pm8001_ha->general_stat_tbl_addr;
106 pm8001_ha->gs_tbl.pm80xx_tbl.gst_len_mpistate =
107 pm8001_mr32(address, GST_GSTLEN_MPIS_OFFSET);
108 pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state0 =
109 pm8001_mr32(address, GST_IQ_FREEZE_STATE0_OFFSET);
110 pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state1 =
111 pm8001_mr32(address, GST_IQ_FREEZE_STATE1_OFFSET);
112 pm8001_ha->gs_tbl.pm80xx_tbl.msgu_tcnt =
113 pm8001_mr32(address, GST_MSGUTCNT_OFFSET);
114 pm8001_ha->gs_tbl.pm80xx_tbl.iop_tcnt =
115 pm8001_mr32(address, GST_IOPTCNT_OFFSET);
116 pm8001_ha->gs_tbl.pm80xx_tbl.gpio_input_val =
117 pm8001_mr32(address, GST_GPIO_INPUT_VAL);
118 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[0] =
119 pm8001_mr32(address, GST_RERRINFO_OFFSET0);
120 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[1] =
121 pm8001_mr32(address, GST_RERRINFO_OFFSET1);
122 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[2] =
123 pm8001_mr32(address, GST_RERRINFO_OFFSET2);
124 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[3] =
125 pm8001_mr32(address, GST_RERRINFO_OFFSET3);
126 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[4] =
127 pm8001_mr32(address, GST_RERRINFO_OFFSET4);
128 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[5] =
129 pm8001_mr32(address, GST_RERRINFO_OFFSET5);
130 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[6] =
131 pm8001_mr32(address, GST_RERRINFO_OFFSET6);
132 pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[7] =
133 pm8001_mr32(address, GST_RERRINFO_OFFSET7);
134}
135/**
136 * read_phy_attr_table - read the phy attribute table and save it.
137 * @pm8001_ha: our hba card information
138 */
139static void read_phy_attr_table(struct pm8001_hba_info *pm8001_ha)
140{
141 void __iomem *address = pm8001_ha->pspa_q_tbl_addr;
142 pm8001_ha->phy_attr_table.phystart1_16[0] =
143 pm8001_mr32(address, PSPA_PHYSTATE0_OFFSET);
144 pm8001_ha->phy_attr_table.phystart1_16[1] =
145 pm8001_mr32(address, PSPA_PHYSTATE1_OFFSET);
146 pm8001_ha->phy_attr_table.phystart1_16[2] =
147 pm8001_mr32(address, PSPA_PHYSTATE2_OFFSET);
148 pm8001_ha->phy_attr_table.phystart1_16[3] =
149 pm8001_mr32(address, PSPA_PHYSTATE3_OFFSET);
150 pm8001_ha->phy_attr_table.phystart1_16[4] =
151 pm8001_mr32(address, PSPA_PHYSTATE4_OFFSET);
152 pm8001_ha->phy_attr_table.phystart1_16[5] =
153 pm8001_mr32(address, PSPA_PHYSTATE5_OFFSET);
154 pm8001_ha->phy_attr_table.phystart1_16[6] =
155 pm8001_mr32(address, PSPA_PHYSTATE6_OFFSET);
156 pm8001_ha->phy_attr_table.phystart1_16[7] =
157 pm8001_mr32(address, PSPA_PHYSTATE7_OFFSET);
158 pm8001_ha->phy_attr_table.phystart1_16[8] =
159 pm8001_mr32(address, PSPA_PHYSTATE8_OFFSET);
160 pm8001_ha->phy_attr_table.phystart1_16[9] =
161 pm8001_mr32(address, PSPA_PHYSTATE9_OFFSET);
162 pm8001_ha->phy_attr_table.phystart1_16[10] =
163 pm8001_mr32(address, PSPA_PHYSTATE10_OFFSET);
164 pm8001_ha->phy_attr_table.phystart1_16[11] =
165 pm8001_mr32(address, PSPA_PHYSTATE11_OFFSET);
166 pm8001_ha->phy_attr_table.phystart1_16[12] =
167 pm8001_mr32(address, PSPA_PHYSTATE12_OFFSET);
168 pm8001_ha->phy_attr_table.phystart1_16[13] =
169 pm8001_mr32(address, PSPA_PHYSTATE13_OFFSET);
170 pm8001_ha->phy_attr_table.phystart1_16[14] =
171 pm8001_mr32(address, PSPA_PHYSTATE14_OFFSET);
172 pm8001_ha->phy_attr_table.phystart1_16[15] =
173 pm8001_mr32(address, PSPA_PHYSTATE15_OFFSET);
174
175 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[0] =
176 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID0_OFFSET);
177 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[1] =
178 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID1_OFFSET);
179 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[2] =
180 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID2_OFFSET);
181 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[3] =
182 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID3_OFFSET);
183 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[4] =
184 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID4_OFFSET);
185 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[5] =
186 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID5_OFFSET);
187 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[6] =
188 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID6_OFFSET);
189 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[7] =
190 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID7_OFFSET);
191 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[8] =
192 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID8_OFFSET);
193 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[9] =
194 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID9_OFFSET);
195 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[10] =
196 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID10_OFFSET);
197 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[11] =
198 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID11_OFFSET);
199 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[12] =
200 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID12_OFFSET);
201 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[13] =
202 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID13_OFFSET);
203 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[14] =
204 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID14_OFFSET);
205 pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[15] =
206 pm8001_mr32(address, PSPA_OB_HW_EVENT_PID15_OFFSET);
207
208}
209
210/**
211 * read_inbnd_queue_table - read the inbound queue table and save it.
212 * @pm8001_ha: our hba card information
213 */
214static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
215{
216 int i;
217 void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
218 for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
219 u32 offset = i * 0x20;
220 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
221 get_pci_bar_index(pm8001_mr32(address,
222 (offset + IB_PIPCI_BAR)));
223 pm8001_ha->inbnd_q_tbl[i].pi_offset =
224 pm8001_mr32(address, (offset + IB_PIPCI_BAR_OFFSET));
225 }
226}
227
228/**
229 * read_outbnd_queue_table - read the outbound queue table and save it.
230 * @pm8001_ha: our hba card information
231 */
232static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
233{
234 int i;
235 void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
236 for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
237 u32 offset = i * 0x24;
238 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
239 get_pci_bar_index(pm8001_mr32(address,
240 (offset + OB_CIPCI_BAR)));
241 pm8001_ha->outbnd_q_tbl[i].ci_offset =
242 pm8001_mr32(address, (offset + OB_CIPCI_BAR_OFFSET));
243 }
244}
245
246/**
247 * init_default_table_values - init the default table.
248 * @pm8001_ha: our hba card information
249 */
250static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
251{
252 int i;
253 u32 offsetib, offsetob;
254 void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
255 void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
256
257 pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr =
258 pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
259 pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr =
260 pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
261 pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size =
262 PM8001_EVENT_LOG_SIZE;
263 pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity = 0x01;
264 pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr =
265 pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
266 pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr =
267 pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
268 pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size =
269 PM8001_EVENT_LOG_SIZE;
270 pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01;
271 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01;
272
273 /* Disable end to end CRC checking */
274 pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
275
276 for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
277 pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
278 PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
279 pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
280 pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
281 pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
282 pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
283 pm8001_ha->inbnd_q_tbl[i].base_virt =
284 (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
285 pm8001_ha->inbnd_q_tbl[i].total_length =
286 pm8001_ha->memoryMap.region[IB + i].total_len;
287 pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr =
288 pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
289 pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr =
290 pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
291 pm8001_ha->inbnd_q_tbl[i].ci_virt =
292 pm8001_ha->memoryMap.region[CI + i].virt_ptr;
293 offsetib = i * 0x20;
294 pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
295 get_pci_bar_index(pm8001_mr32(addressib,
296 (offsetib + 0x14)));
297 pm8001_ha->inbnd_q_tbl[i].pi_offset =
298 pm8001_mr32(addressib, (offsetib + 0x18));
299 pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
300 pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
301 }
302 for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
303 pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
304 PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
305 pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
306 pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
307 pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
308 pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
309 pm8001_ha->outbnd_q_tbl[i].base_virt =
310 (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
311 pm8001_ha->outbnd_q_tbl[i].total_length =
312 pm8001_ha->memoryMap.region[OB + i].total_len;
313 pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr =
314 pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
315 pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr =
316 pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
317 /* interrupt vector based on oq */
318 pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24);
319 pm8001_ha->outbnd_q_tbl[i].pi_virt =
320 pm8001_ha->memoryMap.region[PI + i].virt_ptr;
321 offsetob = i * 0x24;
322 pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
323 get_pci_bar_index(pm8001_mr32(addressob,
324 offsetob + 0x14));
325 pm8001_ha->outbnd_q_tbl[i].ci_offset =
326 pm8001_mr32(addressob, (offsetob + 0x18));
327 pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0;
328 pm8001_ha->outbnd_q_tbl[i].producer_index = 0;
329 }
330}
331
332/**
333 * update_main_config_table - update the main default table to the HBA.
334 * @pm8001_ha: our hba card information
335 */
336static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
337{
338 void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
339 pm8001_mw32(address, MAIN_IQNPPD_HPPD_OFFSET,
340 pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_q_nppd_hppd);
341 pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_HI,
342 pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr);
343 pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_LO,
344 pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr);
345 pm8001_mw32(address, MAIN_EVENT_LOG_BUFF_SIZE,
346 pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size);
347 pm8001_mw32(address, MAIN_EVENT_LOG_OPTION,
348 pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity);
349 pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_HI,
350 pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr);
351 pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_LO,
352 pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr);
353 pm8001_mw32(address, MAIN_PCS_EVENT_LOG_BUFF_SIZE,
354 pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size);
355 pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION,
356 pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity);
357 pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
358 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
359 pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
360 pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump);
361
362 /* SPCv specific */
363 pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping &= 0xCFFFFFFF;
364 /* Set GPIOLED to 0x2 for LED indicator */
365 pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000;
366 pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET,
367 pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping);
368
369 pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
370 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
371 pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY,
372 pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay);
373}
374
375/**
376 * update_inbnd_queue_table - update the inbound queue table to the HBA.
377 * @pm8001_ha: our hba card information
378 */
379static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
380 int number)
381{
382 void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
383 u16 offset = number * 0x20;
384 pm8001_mw32(address, offset + IB_PROPERITY_OFFSET,
385 pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt);
386 pm8001_mw32(address, offset + IB_BASE_ADDR_HI_OFFSET,
387 pm8001_ha->inbnd_q_tbl[number].upper_base_addr);
388 pm8001_mw32(address, offset + IB_BASE_ADDR_LO_OFFSET,
389 pm8001_ha->inbnd_q_tbl[number].lower_base_addr);
390 pm8001_mw32(address, offset + IB_CI_BASE_ADDR_HI_OFFSET,
391 pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
392 pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET,
393 pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
394}
395
396/**
397 * update_outbnd_queue_table - update the outbound queue table to the HBA.
398 * @pm8001_ha: our hba card information
399 */
400static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
401 int number)
402{
403 void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
404 u16 offset = number * 0x24;
405 pm8001_mw32(address, offset + OB_PROPERITY_OFFSET,
406 pm8001_ha->outbnd_q_tbl[number].element_size_cnt);
407 pm8001_mw32(address, offset + OB_BASE_ADDR_HI_OFFSET,
408 pm8001_ha->outbnd_q_tbl[number].upper_base_addr);
409 pm8001_mw32(address, offset + OB_BASE_ADDR_LO_OFFSET,
410 pm8001_ha->outbnd_q_tbl[number].lower_base_addr);
411 pm8001_mw32(address, offset + OB_PI_BASE_ADDR_HI_OFFSET,
412 pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr);
413 pm8001_mw32(address, offset + OB_PI_BASE_ADDR_LO_OFFSET,
414 pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
415 pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET,
416 pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
417}
418
419/**
420 * mpi_init_check - check firmware initialization status.
421 * @pm8001_ha: our hba card information
422 */
423static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
424{
425 u32 max_wait_count;
426 u32 value;
427 u32 gst_len_mpistate;
428
429 /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the
430 table is updated */
431 pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
432 /* wait until Inbound DoorBell Clear Register toggled */
433 max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */
434 do {
435 udelay(1);
436 value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
437 value &= SPCv_MSGU_CFG_TABLE_UPDATE;
438 } while ((value != 0) && (--max_wait_count));
439
440 if (!max_wait_count)
441 return -1;
442 /* check the MPI-State for initialization upto 100ms*/
443 max_wait_count = 100 * 1000;/* 100 msec */
444 do {
445 udelay(1);
446 gst_len_mpistate =
447 pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
448 GST_GSTLEN_MPIS_OFFSET);
449 } while ((GST_MPI_STATE_INIT !=
450 (gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count));
451 if (!max_wait_count)
452 return -1;
453
454 /* check MPI Initialization error */
455 gst_len_mpistate = gst_len_mpistate >> 16;
456 if (0x0000 != gst_len_mpistate)
457 return -1;
458
459 return 0;
460}
461
462/**
463 * check_fw_ready - The LLDD check if the FW is ready, if not, return error.
464 * @pm8001_ha: our hba card information
465 */
466static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
467{
468 u32 value;
469 u32 max_wait_count;
470 u32 max_wait_time;
471 int ret = 0;
472
473 /* reset / PCIe ready */
474 max_wait_time = max_wait_count = 100 * 1000; /* 100 milli sec */
475 do {
476 udelay(1);
477 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
478 } while ((value == 0xFFFFFFFF) && (--max_wait_count));
479
480 /* check ila status */
481 max_wait_time = max_wait_count = 1000 * 1000; /* 1000 milli sec */
482 do {
483 udelay(1);
484 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
485 } while (((value & SCRATCH_PAD_ILA_READY) !=
486 SCRATCH_PAD_ILA_READY) && (--max_wait_count));
487 if (!max_wait_count)
488 ret = -1;
489 else {
490 PM8001_MSG_DBG(pm8001_ha,
491 pm8001_printk(" ila ready status in %d millisec\n",
492 (max_wait_time - max_wait_count)));
493 }
494
495 /* check RAAE status */
496 max_wait_time = max_wait_count = 1800 * 1000; /* 1800 milli sec */
497 do {
498 udelay(1);
499 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
500 } while (((value & SCRATCH_PAD_RAAE_READY) !=
501 SCRATCH_PAD_RAAE_READY) && (--max_wait_count));
502 if (!max_wait_count)
503 ret = -1;
504 else {
505 PM8001_MSG_DBG(pm8001_ha,
506 pm8001_printk(" raae ready status in %d millisec\n",
507 (max_wait_time - max_wait_count)));
508 }
509
510 /* check iop0 status */
511 max_wait_time = max_wait_count = 600 * 1000; /* 600 milli sec */
512 do {
513 udelay(1);
514 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
515 } while (((value & SCRATCH_PAD_IOP0_READY) != SCRATCH_PAD_IOP0_READY) &&
516 (--max_wait_count));
517 if (!max_wait_count)
518 ret = -1;
519 else {
520 PM8001_MSG_DBG(pm8001_ha,
521 pm8001_printk(" iop0 ready status in %d millisec\n",
522 (max_wait_time - max_wait_count)));
523 }
524
525 /* check iop1 status only for 16 port controllers */
526 if ((pm8001_ha->chip_id != chip_8008) &&
527 (pm8001_ha->chip_id != chip_8009)) {
528 /* 200 milli sec */
529 max_wait_time = max_wait_count = 200 * 1000;
530 do {
531 udelay(1);
532 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
533 } while (((value & SCRATCH_PAD_IOP1_READY) !=
534 SCRATCH_PAD_IOP1_READY) && (--max_wait_count));
535 if (!max_wait_count)
536 ret = -1;
537 else {
538 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
539 "iop1 ready status in %d millisec\n",
540 (max_wait_time - max_wait_count)));
541 }
542 }
543
544 return ret;
545}
546
547static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
548{
549 void __iomem *base_addr;
550 u32 value;
551 u32 offset;
552 u32 pcibar;
553 u32 pcilogic;
554
555 value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
556 offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */
557
558 PM8001_INIT_DBG(pm8001_ha,
559 pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n",
560 offset, value));
561 pcilogic = (value & 0xFC000000) >> 26;
562 pcibar = get_pci_bar_index(pcilogic);
563 PM8001_INIT_DBG(pm8001_ha,
564 pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar));
565 pm8001_ha->main_cfg_tbl_addr = base_addr =
566 pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
567 pm8001_ha->general_stat_tbl_addr =
568 base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) &
569 0xFFFFFF);
570 pm8001_ha->inbnd_q_tbl_addr =
571 base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C) &
572 0xFFFFFF);
573 pm8001_ha->outbnd_q_tbl_addr =
574 base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x20) &
575 0xFFFFFF);
576 pm8001_ha->ivt_tbl_addr =
577 base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C) &
578 0xFFFFFF);
579 pm8001_ha->pspa_q_tbl_addr =
580 base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) &
581 0xFFFFFF);
582
583 PM8001_INIT_DBG(pm8001_ha,
584 pm8001_printk("GST OFFSET 0x%x\n",
585 pm8001_cr32(pm8001_ha, pcibar, offset + 0x18)));
586 PM8001_INIT_DBG(pm8001_ha,
587 pm8001_printk("INBND OFFSET 0x%x\n",
588 pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C)));
589 PM8001_INIT_DBG(pm8001_ha,
590 pm8001_printk("OBND OFFSET 0x%x\n",
591 pm8001_cr32(pm8001_ha, pcibar, offset + 0x20)));
592 PM8001_INIT_DBG(pm8001_ha,
593 pm8001_printk("IVT OFFSET 0x%x\n",
594 pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C)));
595 PM8001_INIT_DBG(pm8001_ha,
596 pm8001_printk("PSPA OFFSET 0x%x\n",
597 pm8001_cr32(pm8001_ha, pcibar, offset + 0x90)));
598 PM8001_INIT_DBG(pm8001_ha,
599 pm8001_printk("addr - main cfg %p general status %p\n",
600 pm8001_ha->main_cfg_tbl_addr,
601 pm8001_ha->general_stat_tbl_addr));
602 PM8001_INIT_DBG(pm8001_ha,
603 pm8001_printk("addr - inbnd %p obnd %p\n",
604 pm8001_ha->inbnd_q_tbl_addr,
605 pm8001_ha->outbnd_q_tbl_addr));
606 PM8001_INIT_DBG(pm8001_ha,
607 pm8001_printk("addr - pspa %p ivt %p\n",
608 pm8001_ha->pspa_q_tbl_addr,
609 pm8001_ha->ivt_tbl_addr));
610}
611
612/**
613 * pm80xx_set_thermal_config - support the thermal configuration
614 * @pm8001_ha: our hba card information.
615 */
616int
617pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
618{
619 struct set_ctrl_cfg_req payload;
620 struct inbound_queue_table *circularQ;
621 int rc;
622 u32 tag;
623 u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
624
625 memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
626 rc = pm8001_tag_alloc(pm8001_ha, &tag);
627 if (rc)
628 return -1;
629
630 circularQ = &pm8001_ha->inbnd_q_tbl[0];
631 payload.tag = cpu_to_le32(tag);
632 payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) |
633 (THERMAL_ENABLE << 8) | THERMAL_OP_CODE;
634 payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
635
636 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
637 return rc;
638
639}
640
641/**
642* pm80xx_set_sas_protocol_timer_config - support the SAS Protocol
643* Timer configuration page
644* @pm8001_ha: our hba card information.
645*/
646static int
647pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
648{
649 struct set_ctrl_cfg_req payload;
650 struct inbound_queue_table *circularQ;
651 SASProtocolTimerConfig_t SASConfigPage;
652 int rc;
653 u32 tag;
654 u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
655
656 memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
657 memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t));
658
659 rc = pm8001_tag_alloc(pm8001_ha, &tag);
660
661 if (rc)
662 return -1;
663
664 circularQ = &pm8001_ha->inbnd_q_tbl[0];
665 payload.tag = cpu_to_le32(tag);
666
667 SASConfigPage.pageCode = SAS_PROTOCOL_TIMER_CONFIG_PAGE;
668 SASConfigPage.MST_MSI = 3 << 15;
669 SASConfigPage.STP_SSP_MCT_TMO = (STP_MCT_TMO << 16) | SSP_MCT_TMO;
670 SASConfigPage.STP_FRM_TMO = (SAS_MAX_OPEN_TIME << 24) |
671 (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER;
672 SASConfigPage.STP_IDLE_TMO = STP_IDLE_TIME;
673
674 if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF)
675 SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF;
676
677
678 SASConfigPage.OPNRJT_RTRY_INTVL = (SAS_MFD << 16) |
679 SAS_OPNRJT_RTRY_INTVL;
680 SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = (SAS_DOPNRJT_RTRY_TMO << 16)
681 | SAS_COPNRJT_RTRY_TMO;
682 SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = (SAS_DOPNRJT_RTRY_THR << 16)
683 | SAS_COPNRJT_RTRY_THR;
684 SASConfigPage.MAX_AIP = SAS_MAX_AIP;
685
686 PM8001_INIT_DBG(pm8001_ha,
687 pm8001_printk("SASConfigPage.pageCode "
688 "0x%08x\n", SASConfigPage.pageCode));
689 PM8001_INIT_DBG(pm8001_ha,
690 pm8001_printk("SASConfigPage.MST_MSI "
691 " 0x%08x\n", SASConfigPage.MST_MSI));
692 PM8001_INIT_DBG(pm8001_ha,
693 pm8001_printk("SASConfigPage.STP_SSP_MCT_TMO "
694 " 0x%08x\n", SASConfigPage.STP_SSP_MCT_TMO));
695 PM8001_INIT_DBG(pm8001_ha,
696 pm8001_printk("SASConfigPage.STP_FRM_TMO "
697 " 0x%08x\n", SASConfigPage.STP_FRM_TMO));
698 PM8001_INIT_DBG(pm8001_ha,
699 pm8001_printk("SASConfigPage.STP_IDLE_TMO "
700 " 0x%08x\n", SASConfigPage.STP_IDLE_TMO));
701 PM8001_INIT_DBG(pm8001_ha,
702 pm8001_printk("SASConfigPage.OPNRJT_RTRY_INTVL "
703 " 0x%08x\n", SASConfigPage.OPNRJT_RTRY_INTVL));
704 PM8001_INIT_DBG(pm8001_ha,
705 pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO "
706 " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO));
707 PM8001_INIT_DBG(pm8001_ha,
708 pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR "
709 " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR));
710 PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SASConfigPage.MAX_AIP "
711 " 0x%08x\n", SASConfigPage.MAX_AIP));
712
713 memcpy(&payload.cfg_pg, &SASConfigPage,
714 sizeof(SASProtocolTimerConfig_t));
715
716 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
717
718 return rc;
719}
720
721/**
722 * pm80xx_get_encrypt_info - Check for encryption
723 * @pm8001_ha: our hba card information.
724 */
725static int
726pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
727{
728 u32 scratch3_value;
729 int ret;
730
731 /* Read encryption status from SCRATCH PAD 3 */
732 scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
733
734 if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
735 SCRATCH_PAD3_ENC_READY) {
736 if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
737 pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
738 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
739 SCRATCH_PAD3_SMF_ENABLED)
740 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
741 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
742 SCRATCH_PAD3_SMA_ENABLED)
743 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
744 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
745 SCRATCH_PAD3_SMB_ENABLED)
746 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
747 pm8001_ha->encrypt_info.status = 0;
748 PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
749 "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X."
750 "Cipher mode 0x%x Sec mode 0x%x status 0x%x\n",
751 scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
752 pm8001_ha->encrypt_info.sec_mode,
753 pm8001_ha->encrypt_info.status));
754 ret = 0;
755 } else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) ==
756 SCRATCH_PAD3_ENC_DISABLED) {
757 PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
758 "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n",
759 scratch3_value));
760 pm8001_ha->encrypt_info.status = 0xFFFFFFFF;
761 pm8001_ha->encrypt_info.cipher_mode = 0;
762 pm8001_ha->encrypt_info.sec_mode = 0;
763 return 0;
764 } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
765 SCRATCH_PAD3_ENC_DIS_ERR) {
766 pm8001_ha->encrypt_info.status =
767 (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16;
768 if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
769 pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
770 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
771 SCRATCH_PAD3_SMF_ENABLED)
772 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
773 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
774 SCRATCH_PAD3_SMA_ENABLED)
775 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
776 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
777 SCRATCH_PAD3_SMB_ENABLED)
778 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
779 PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
780 "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X."
781 "Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
782 scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
783 pm8001_ha->encrypt_info.sec_mode,
784 pm8001_ha->encrypt_info.status));
785 ret = -1;
786 } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
787 SCRATCH_PAD3_ENC_ENA_ERR) {
788
789 pm8001_ha->encrypt_info.status =
790 (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16;
791 if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
792 pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
793 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
794 SCRATCH_PAD3_SMF_ENABLED)
795 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
796 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
797 SCRATCH_PAD3_SMA_ENABLED)
798 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
799 if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
800 SCRATCH_PAD3_SMB_ENABLED)
801 pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
802
803 PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
804 "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X."
805 "Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
806 scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
807 pm8001_ha->encrypt_info.sec_mode,
808 pm8001_ha->encrypt_info.status));
809 ret = -1;
810 }
811 return ret;
812}
813
814/**
815 * pm80xx_encrypt_update - update flash with encryption informtion
816 * @pm8001_ha: our hba card information.
817 */
818static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
819{
820 struct kek_mgmt_req payload;
821 struct inbound_queue_table *circularQ;
822 int rc;
823 u32 tag;
824 u32 opc = OPC_INB_KEK_MANAGEMENT;
825
826 memset(&payload, 0, sizeof(struct kek_mgmt_req));
827 rc = pm8001_tag_alloc(pm8001_ha, &tag);
828 if (rc)
829 return -1;
830
831 circularQ = &pm8001_ha->inbnd_q_tbl[0];
832 payload.tag = cpu_to_le32(tag);
833 /* Currently only one key is used. New KEK index is 1.
834 * Current KEK index is 1. Store KEK to NVRAM is 1.
835 */
836 payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
837 KEK_MGMT_SUBOP_KEYCARDUPDATE);
838
839 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
840
841 return rc;
842}
843
844/**
845 * pm8001_chip_init - the main init function that initialize whole PM8001 chip.
846 * @pm8001_ha: our hba card information
847 */
848static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
849{
850 int ret;
851 u8 i = 0;
852
853 /* check the firmware status */
854 if (-1 == check_fw_ready(pm8001_ha)) {
855 PM8001_FAIL_DBG(pm8001_ha,
856 pm8001_printk("Firmware is not ready!\n"));
857 return -EBUSY;
858 }
859
860 /* Initialize pci space address eg: mpi offset */
861 init_pci_device_addresses(pm8001_ha);
862 init_default_table_values(pm8001_ha);
863 read_main_config_table(pm8001_ha);
864 read_general_status_table(pm8001_ha);
865 read_inbnd_queue_table(pm8001_ha);
866 read_outbnd_queue_table(pm8001_ha);
867 read_phy_attr_table(pm8001_ha);
868
869 /* update main config table ,inbound table and outbound table */
870 update_main_config_table(pm8001_ha);
871 for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++)
872 update_inbnd_queue_table(pm8001_ha, i);
873 for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++)
874 update_outbnd_queue_table(pm8001_ha, i);
875
876 /* notify firmware update finished and check initialization status */
877 if (0 == mpi_init_check(pm8001_ha)) {
878 PM8001_INIT_DBG(pm8001_ha,
879 pm8001_printk("MPI initialize successful!\n"));
880 } else
881 return -EBUSY;
882
883 /* send SAS protocol timer configuration page to FW */
884 ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha);
885
886 /* Check for encryption */
887 if (pm8001_ha->chip->encrypt) {
888 PM8001_INIT_DBG(pm8001_ha,
889 pm8001_printk("Checking for encryption\n"));
890 ret = pm80xx_get_encrypt_info(pm8001_ha);
891 if (ret == -1) {
892 PM8001_INIT_DBG(pm8001_ha,
893 pm8001_printk("Encryption error !!\n"));
894 if (pm8001_ha->encrypt_info.status == 0x81) {
895 PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
896 "Encryption enabled with error."
897 "Saving encryption key to flash\n"));
898 pm80xx_encrypt_update(pm8001_ha);
899 }
900 }
901 }
902 return 0;
903}
904
905static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
906{
907 u32 max_wait_count;
908 u32 value;
909 u32 gst_len_mpistate;
910 init_pci_device_addresses(pm8001_ha);
911 /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
912 table is stop */
913 pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET);
914
915 /* wait until Inbound DoorBell Clear Register toggled */
916 max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */
917 do {
918 udelay(1);
919 value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
920 value &= SPCv_MSGU_CFG_TABLE_RESET;
921 } while ((value != 0) && (--max_wait_count));
922
923 if (!max_wait_count) {
924 PM8001_FAIL_DBG(pm8001_ha,
925 pm8001_printk("TIMEOUT:IBDB value/=%x\n", value));
926 return -1;
927 }
928
929 /* check the MPI-State for termination in progress */
930 /* wait until Inbound DoorBell Clear Register toggled */
931 max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */
932 do {
933 udelay(1);
934 gst_len_mpistate =
935 pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
936 GST_GSTLEN_MPIS_OFFSET);
937 if (GST_MPI_STATE_UNINIT ==
938 (gst_len_mpistate & GST_MPI_STATE_MASK))
939 break;
940 } while (--max_wait_count);
941 if (!max_wait_count) {
942 PM8001_FAIL_DBG(pm8001_ha,
943 pm8001_printk(" TIME OUT MPI State = 0x%x\n",
944 gst_len_mpistate & GST_MPI_STATE_MASK));
945 return -1;
946 }
947
948 return 0;
949}
950
951/**
952 * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
953 * the FW register status to the originated status.
954 * @pm8001_ha: our hba card information
955 */
956
957static int
958pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
959{
960 u32 regval;
961 u32 bootloader_state;
962
963 /* Check if MPI is in ready state to reset */
964 if (mpi_uninit_check(pm8001_ha) != 0) {
965 PM8001_FAIL_DBG(pm8001_ha,
966 pm8001_printk("MPI state is not ready\n"));
967 return -1;
968 }
969
970 /* checked for reset register normal state; 0x0 */
971 regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
972 PM8001_INIT_DBG(pm8001_ha,
973 pm8001_printk("reset register before write : 0x%x\n", regval));
974
975 pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE);
976 mdelay(500);
977
978 regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
979 PM8001_INIT_DBG(pm8001_ha,
980 pm8001_printk("reset register after write 0x%x\n", regval));
981
982 if ((regval & SPCv_SOFT_RESET_READ_MASK) ==
983 SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) {
984 PM8001_MSG_DBG(pm8001_ha,
985 pm8001_printk(" soft reset successful [regval: 0x%x]\n",
986 regval));
987 } else {
988 PM8001_MSG_DBG(pm8001_ha,
989 pm8001_printk(" soft reset failed [regval: 0x%x]\n",
990 regval));
991
992 /* check bootloader is successfully executed or in HDA mode */
993 bootloader_state =
994 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) &
995 SCRATCH_PAD1_BOOTSTATE_MASK;
996
997 if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) {
998 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
999 "Bootloader state - HDA mode SEEPROM\n"));
1000 } else if (bootloader_state ==
1001 SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) {
1002 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
1003 "Bootloader state - HDA mode Bootstrap Pin\n"));
1004 } else if (bootloader_state ==
1005 SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) {
1006 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
1007 "Bootloader state - HDA mode soft reset\n"));
1008 } else if (bootloader_state ==
1009 SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) {
1010 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
1011 "Bootloader state-HDA mode critical error\n"));
1012 }
1013 return -EBUSY;
1014 }
1015
1016 /* check the firmware status after reset */
1017 if (-1 == check_fw_ready(pm8001_ha)) {
1018 PM8001_FAIL_DBG(pm8001_ha,
1019 pm8001_printk("Firmware is not ready!\n"));
1020 return -EBUSY;
1021 }
1022 PM8001_INIT_DBG(pm8001_ha,
1023 pm8001_printk("SPCv soft reset Complete\n"));
1024 return 0;
1025}
1026
1027static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
1028{
1029 u32 i;
1030
1031 PM8001_INIT_DBG(pm8001_ha,
1032 pm8001_printk("chip reset start\n"));
1033
1034 /* do SPCv chip reset. */
1035 pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11);
1036 PM8001_INIT_DBG(pm8001_ha,
1037 pm8001_printk("SPC soft reset Complete\n"));
1038
1039 /* Check this ..whether delay is required or no */
1040 /* delay 10 usec */
1041 udelay(10);
1042
1043 /* wait for 20 msec until the firmware gets reloaded */
1044 i = 20;
1045 do {
1046 mdelay(1);
1047 } while ((--i) != 0);
1048
1049 PM8001_INIT_DBG(pm8001_ha,
1050 pm8001_printk("chip reset finished\n"));
1051}
1052
1053/**
1054 * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
1055 * @pm8001_ha: our hba card information
1056 */
1057static void
1058pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
1059{
1060 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
1061 pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
1062}
1063
1064/**
1065 * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
1066 * @pm8001_ha: our hba card information
1067 */
1068static void
1069pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
1070{
1071 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL);
1072}
1073
1074/**
1075 * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
1076 * @pm8001_ha: our hba card information
1077 */
1078static void
1079pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
1080{
1081#ifdef PM8001_USE_MSIX
1082 u32 mask;
1083 mask = (u32)(1 << vec);
1084
1085 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
1086 return;
1087#endif
1088 pm80xx_chip_intx_interrupt_enable(pm8001_ha);
1089
1090}
1091
1092/**
1093 * pm8001_chip_interrupt_disable- disable PM8001 chip interrupt
1094 * @pm8001_ha: our hba card information
1095 */
1096static void
1097pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
1098{
1099#ifdef PM8001_USE_MSIX
1100 u32 mask;
1101 if (vec == 0xFF)
1102 mask = 0xFFFFFFFF;
1103 else
1104 mask = (u32)(1 << vec);
1105 pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
1106 return;
1107#endif
1108 pm80xx_chip_intx_interrupt_disable(pm8001_ha);
1109}
1110
1111static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
1112 struct pm8001_device *pm8001_ha_dev)
1113{
1114 int res;
1115 u32 ccb_tag;
1116 struct pm8001_ccb_info *ccb;
1117 struct sas_task *task = NULL;
1118 struct task_abort_req task_abort;
1119 struct inbound_queue_table *circularQ;
1120 u32 opc = OPC_INB_SATA_ABORT;
1121 int ret;
1122
1123 if (!pm8001_ha_dev) {
1124 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
1125 return;
1126 }
1127
1128 task = sas_alloc_slow_task(GFP_ATOMIC);
1129
1130 if (!task) {
1131 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
1132 "allocate task\n"));
1133 return;
1134 }
1135
1136 task->task_done = pm8001_task_done;
1137
1138 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
1139 if (res)
1140 return;
1141
1142 ccb = &pm8001_ha->ccb_info[ccb_tag];
1143 ccb->device = pm8001_ha_dev;
1144 ccb->ccb_tag = ccb_tag;
1145 ccb->task = task;
1146
1147 circularQ = &pm8001_ha->inbnd_q_tbl[0];
1148
1149 memset(&task_abort, 0, sizeof(task_abort));
1150 task_abort.abort_all = cpu_to_le32(1);
1151 task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
1152 task_abort.tag = cpu_to_le32(ccb_tag);
1153
1154 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
1155
1156}
1157
1158static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
1159 struct pm8001_device *pm8001_ha_dev)
1160{
1161 struct sata_start_req sata_cmd;
1162 int res;
1163 u32 ccb_tag;
1164 struct pm8001_ccb_info *ccb;
1165 struct sas_task *task = NULL;
1166 struct host_to_dev_fis fis;
1167 struct domain_device *dev;
1168 struct inbound_queue_table *circularQ;
1169 u32 opc = OPC_INB_SATA_HOST_OPSTART;
1170
1171 task = sas_alloc_slow_task(GFP_ATOMIC);
1172
1173 if (!task) {
1174 PM8001_FAIL_DBG(pm8001_ha,
1175 pm8001_printk("cannot allocate task !!!\n"));
1176 return;
1177 }
1178 task->task_done = pm8001_task_done;
1179
1180 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
1181 if (res) {
1182 PM8001_FAIL_DBG(pm8001_ha,
1183 pm8001_printk("cannot allocate tag !!!\n"));
1184 return;
1185 }
1186
1187 /* allocate domain device by ourselves as libsas
1188 * is not going to provide any
1189 */
1190 dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
1191 if (!dev) {
1192 PM8001_FAIL_DBG(pm8001_ha,
1193 pm8001_printk("Domain device cannot be allocated\n"));
1194 sas_free_task(task);
1195 return;
1196 } else {
1197 task->dev = dev;
1198 task->dev->lldd_dev = pm8001_ha_dev;
1199 }
1200
1201 ccb = &pm8001_ha->ccb_info[ccb_tag];
1202 ccb->device = pm8001_ha_dev;
1203 ccb->ccb_tag = ccb_tag;
1204 ccb->task = task;
1205 pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
1206 pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
1207
1208 memset(&sata_cmd, 0, sizeof(sata_cmd));
1209 circularQ = &pm8001_ha->inbnd_q_tbl[0];
1210
1211 /* construct read log FIS */
1212 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1213 fis.fis_type = 0x27;
1214 fis.flags = 0x80;
1215 fis.command = ATA_CMD_READ_LOG_EXT;
1216 fis.lbal = 0x10;
1217 fis.sector_count = 0x1;
1218
1219 sata_cmd.tag = cpu_to_le32(ccb_tag);
1220 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
1221 sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
1222 memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
1223
1224 res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
1225
1226}
1227
1228/**
1229 * mpi_ssp_completion- process the event that FW response to the SSP request.
1230 * @pm8001_ha: our hba card information
1231 * @piomb: the message contents of this outbound message.
1232 *
1233 * When FW has completed a ssp request for example a IO request, after it has
1234 * filled the SG data with the data, it will trigger this event represent
1235 * that he has finished the job,please check the coresponding buffer.
1236 * So we will tell the caller who maybe waiting the result to tell upper layer
1237 * that the task has been finished.
1238 */
1239static void
1240mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
1241{
1242 struct sas_task *t;
1243 struct pm8001_ccb_info *ccb;
1244 unsigned long flags;
1245 u32 status;
1246 u32 param;
1247 u32 tag;
1248 struct ssp_completion_resp *psspPayload;
1249 struct task_status_struct *ts;
1250 struct ssp_response_iu *iu;
1251 struct pm8001_device *pm8001_dev;
1252 psspPayload = (struct ssp_completion_resp *)(piomb + 4);
1253 status = le32_to_cpu(psspPayload->status);
1254 tag = le32_to_cpu(psspPayload->tag);
1255 ccb = &pm8001_ha->ccb_info[tag];
1256 if ((status == IO_ABORTED) && ccb->open_retry) {
1257 /* Being completed by another */
1258 ccb->open_retry = 0;
1259 return;
1260 }
1261 pm8001_dev = ccb->device;
1262 param = le32_to_cpu(psspPayload->param);
1263 t = ccb->task;
1264
1265 if (status && status != IO_UNDERFLOW)
1266 PM8001_FAIL_DBG(pm8001_ha,
1267 pm8001_printk("sas IO status 0x%x\n", status));
1268 if (unlikely(!t || !t->lldd_task || !t->dev))
1269 return;
1270 ts = &t->task_status;
1271 switch (status) {
1272 case IO_SUCCESS:
1273 PM8001_IO_DBG(pm8001_ha,
1274 pm8001_printk("IO_SUCCESS ,param = 0x%x\n",
1275 param));
1276 if (param == 0) {
1277 ts->resp = SAS_TASK_COMPLETE;
1278 ts->stat = SAM_STAT_GOOD;
1279 } else {
1280 ts->resp = SAS_TASK_COMPLETE;
1281 ts->stat = SAS_PROTO_RESPONSE;
1282 ts->residual = param;
1283 iu = &psspPayload->ssp_resp_iu;
1284 sas_ssp_task_response(pm8001_ha->dev, t, iu);
1285 }
1286 if (pm8001_dev)
1287 pm8001_dev->running_req--;
1288 break;
1289 case IO_ABORTED:
1290 PM8001_IO_DBG(pm8001_ha,
1291 pm8001_printk("IO_ABORTED IOMB Tag\n"));
1292 ts->resp = SAS_TASK_COMPLETE;
1293 ts->stat = SAS_ABORTED_TASK;
1294 break;
1295 case IO_UNDERFLOW:
1296 /* SSP Completion with error */
1297 PM8001_IO_DBG(pm8001_ha,
1298 pm8001_printk("IO_UNDERFLOW ,param = 0x%x\n",
1299 param));
1300 ts->resp = SAS_TASK_COMPLETE;
1301 ts->stat = SAS_DATA_UNDERRUN;
1302 ts->residual = param;
1303 if (pm8001_dev)
1304 pm8001_dev->running_req--;
1305 break;
1306 case IO_NO_DEVICE:
1307 PM8001_IO_DBG(pm8001_ha,
1308 pm8001_printk("IO_NO_DEVICE\n"));
1309 ts->resp = SAS_TASK_UNDELIVERED;
1310 ts->stat = SAS_PHY_DOWN;
1311 break;
1312 case IO_XFER_ERROR_BREAK:
1313 PM8001_IO_DBG(pm8001_ha,
1314 pm8001_printk("IO_XFER_ERROR_BREAK\n"));
1315 ts->resp = SAS_TASK_COMPLETE;
1316 ts->stat = SAS_OPEN_REJECT;
1317 /* Force the midlayer to retry */
1318 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1319 break;
1320 case IO_XFER_ERROR_PHY_NOT_READY:
1321 PM8001_IO_DBG(pm8001_ha,
1322 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
1323 ts->resp = SAS_TASK_COMPLETE;
1324 ts->stat = SAS_OPEN_REJECT;
1325 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1326 break;
1327 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
1328 PM8001_IO_DBG(pm8001_ha,
1329 pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
1330 ts->resp = SAS_TASK_COMPLETE;
1331 ts->stat = SAS_OPEN_REJECT;
1332 ts->open_rej_reason = SAS_OREJ_EPROTO;
1333 break;
1334 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
1335 PM8001_IO_DBG(pm8001_ha,
1336 pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
1337 ts->resp = SAS_TASK_COMPLETE;
1338 ts->stat = SAS_OPEN_REJECT;
1339 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1340 break;
1341 case IO_OPEN_CNX_ERROR_BREAK:
1342 PM8001_IO_DBG(pm8001_ha,
1343 pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
1344 ts->resp = SAS_TASK_COMPLETE;
1345 ts->stat = SAS_OPEN_REJECT;
1346 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1347 break;
1348 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
1349 case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
1350 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
1351 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
1352 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
1353 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
1354 PM8001_IO_DBG(pm8001_ha,
1355 pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
1356 ts->resp = SAS_TASK_COMPLETE;
1357 ts->stat = SAS_OPEN_REJECT;
1358 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1359 if (!t->uldd_task)
1360 pm8001_handle_event(pm8001_ha,
1361 pm8001_dev,
1362 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
1363 break;
1364 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
1365 PM8001_IO_DBG(pm8001_ha,
1366 pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
1367 ts->resp = SAS_TASK_COMPLETE;
1368 ts->stat = SAS_OPEN_REJECT;
1369 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
1370 break;
1371 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
1372 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
1373 "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
1374 ts->resp = SAS_TASK_COMPLETE;
1375 ts->stat = SAS_OPEN_REJECT;
1376 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
1377 break;
1378 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
1379 PM8001_IO_DBG(pm8001_ha,
1380 pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
1381 ts->resp = SAS_TASK_UNDELIVERED;
1382 ts->stat = SAS_OPEN_REJECT;
1383 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
1384 break;
1385 case IO_XFER_ERROR_NAK_RECEIVED:
1386 PM8001_IO_DBG(pm8001_ha,
1387 pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
1388 ts->resp = SAS_TASK_COMPLETE;
1389 ts->stat = SAS_OPEN_REJECT;
1390 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1391 break;
1392 case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
1393 PM8001_IO_DBG(pm8001_ha,
1394 pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
1395 ts->resp = SAS_TASK_COMPLETE;
1396 ts->stat = SAS_NAK_R_ERR;
1397 break;
1398 case IO_XFER_ERROR_DMA:
1399 PM8001_IO_DBG(pm8001_ha,
1400 pm8001_printk("IO_XFER_ERROR_DMA\n"));
1401 ts->resp = SAS_TASK_COMPLETE;
1402 ts->stat = SAS_OPEN_REJECT;
1403 break;
1404 case IO_XFER_OPEN_RETRY_TIMEOUT:
1405 PM8001_IO_DBG(pm8001_ha,
1406 pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
1407 ts->resp = SAS_TASK_COMPLETE;
1408 ts->stat = SAS_OPEN_REJECT;
1409 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1410 break;
1411 case IO_XFER_ERROR_OFFSET_MISMATCH:
1412 PM8001_IO_DBG(pm8001_ha,
1413 pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
1414 ts->resp = SAS_TASK_COMPLETE;
1415 ts->stat = SAS_OPEN_REJECT;
1416 break;
1417 case IO_PORT_IN_RESET:
1418 PM8001_IO_DBG(pm8001_ha,
1419 pm8001_printk("IO_PORT_IN_RESET\n"));
1420 ts->resp = SAS_TASK_COMPLETE;
1421 ts->stat = SAS_OPEN_REJECT;
1422 break;
1423 case IO_DS_NON_OPERATIONAL:
1424 PM8001_IO_DBG(pm8001_ha,
1425 pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
1426 ts->resp = SAS_TASK_COMPLETE;
1427 ts->stat = SAS_OPEN_REJECT;
1428 if (!t->uldd_task)
1429 pm8001_handle_event(pm8001_ha,
1430 pm8001_dev,
1431 IO_DS_NON_OPERATIONAL);
1432 break;
1433 case IO_DS_IN_RECOVERY:
1434 PM8001_IO_DBG(pm8001_ha,
1435 pm8001_printk("IO_DS_IN_RECOVERY\n"));
1436 ts->resp = SAS_TASK_COMPLETE;
1437 ts->stat = SAS_OPEN_REJECT;
1438 break;
1439 case IO_TM_TAG_NOT_FOUND:
1440 PM8001_IO_DBG(pm8001_ha,
1441 pm8001_printk("IO_TM_TAG_NOT_FOUND\n"));
1442 ts->resp = SAS_TASK_COMPLETE;
1443 ts->stat = SAS_OPEN_REJECT;
1444 break;
1445 case IO_SSP_EXT_IU_ZERO_LEN_ERROR:
1446 PM8001_IO_DBG(pm8001_ha,
1447 pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"));
1448 ts->resp = SAS_TASK_COMPLETE;
1449 ts->stat = SAS_OPEN_REJECT;
1450 break;
1451 case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
1452 PM8001_IO_DBG(pm8001_ha,
1453 pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
1454 ts->resp = SAS_TASK_COMPLETE;
1455 ts->stat = SAS_OPEN_REJECT;
1456 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1457 break;
1458 default:
1459 PM8001_IO_DBG(pm8001_ha,
1460 pm8001_printk("Unknown status 0x%x\n", status));
1461 /* not allowed case. Therefore, return failed status */
1462 ts->resp = SAS_TASK_COMPLETE;
1463 ts->stat = SAS_OPEN_REJECT;
1464 break;
1465 }
1466 PM8001_IO_DBG(pm8001_ha,
1467 pm8001_printk("scsi_status = 0x%x\n ",
1468 psspPayload->ssp_resp_iu.status));
1469 spin_lock_irqsave(&t->task_state_lock, flags);
1470 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
1471 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
1472 t->task_state_flags |= SAS_TASK_STATE_DONE;
1473 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
1474 spin_unlock_irqrestore(&t->task_state_lock, flags);
1475 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
1476 "task 0x%p done with io_status 0x%x resp 0x%x "
1477 "stat 0x%x but aborted by upper layer!\n",
1478 t, status, ts->resp, ts->stat));
1479 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1480 } else {
1481 spin_unlock_irqrestore(&t->task_state_lock, flags);
1482 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1483 mb();/* in order to force CPU ordering */
1484 t->task_done(t);
1485 }
1486}
1487
1488/*See the comments for mpi_ssp_completion */
1489static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
1490{
1491 struct sas_task *t;
1492 unsigned long flags;
1493 struct task_status_struct *ts;
1494 struct pm8001_ccb_info *ccb;
1495 struct pm8001_device *pm8001_dev;
1496 struct ssp_event_resp *psspPayload =
1497 (struct ssp_event_resp *)(piomb + 4);
1498 u32 event = le32_to_cpu(psspPayload->event);
1499 u32 tag = le32_to_cpu(psspPayload->tag);
1500 u32 port_id = le32_to_cpu(psspPayload->port_id);
1501
1502 ccb = &pm8001_ha->ccb_info[tag];
1503 t = ccb->task;
1504 pm8001_dev = ccb->device;
1505 if (event)
1506 PM8001_FAIL_DBG(pm8001_ha,
1507 pm8001_printk("sas IO status 0x%x\n", event));
1508 if (unlikely(!t || !t->lldd_task || !t->dev))
1509 return;
1510 ts = &t->task_status;
1511 PM8001_IO_DBG(pm8001_ha,
1512 pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
1513 port_id, tag, event));
1514 switch (event) {
1515 case IO_OVERFLOW:
1516 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");)
1517 ts->resp = SAS_TASK_COMPLETE;
1518 ts->stat = SAS_DATA_OVERRUN;
1519 ts->residual = 0;
1520 if (pm8001_dev)
1521 pm8001_dev->running_req--;
1522 break;
1523 case IO_XFER_ERROR_BREAK:
1524 PM8001_IO_DBG(pm8001_ha,
1525 pm8001_printk("IO_XFER_ERROR_BREAK\n"));
1526 pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
1527 return;
1528 case IO_XFER_ERROR_PHY_NOT_READY:
1529 PM8001_IO_DBG(pm8001_ha,
1530 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
1531 ts->resp = SAS_TASK_COMPLETE;
1532 ts->stat = SAS_OPEN_REJECT;
1533 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1534 break;
1535 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
1536 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
1537 "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
1538 ts->resp = SAS_TASK_COMPLETE;
1539 ts->stat = SAS_OPEN_REJECT;
1540 ts->open_rej_reason = SAS_OREJ_EPROTO;
1541 break;
1542 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
1543 PM8001_IO_DBG(pm8001_ha,
1544 pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
1545 ts->resp = SAS_TASK_COMPLETE;
1546 ts->stat = SAS_OPEN_REJECT;
1547 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1548 break;
1549 case IO_OPEN_CNX_ERROR_BREAK:
1550 PM8001_IO_DBG(pm8001_ha,
1551 pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
1552 ts->resp = SAS_TASK_COMPLETE;
1553 ts->stat = SAS_OPEN_REJECT;
1554 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1555 break;
1556 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
1557 case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
1558 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
1559 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
1560 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
1561 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
1562 PM8001_IO_DBG(pm8001_ha,
1563 pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
1564 ts->resp = SAS_TASK_COMPLETE;
1565 ts->stat = SAS_OPEN_REJECT;
1566 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1567 if (!t->uldd_task)
1568 pm8001_handle_event(pm8001_ha,
1569 pm8001_dev,
1570 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
1571 break;
1572 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
1573 PM8001_IO_DBG(pm8001_ha,
1574 pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
1575 ts->resp = SAS_TASK_COMPLETE;
1576 ts->stat = SAS_OPEN_REJECT;
1577 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
1578 break;
1579 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
1580 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
1581 "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
1582 ts->resp = SAS_TASK_COMPLETE;
1583 ts->stat = SAS_OPEN_REJECT;
1584 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
1585 break;
1586 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
1587 PM8001_IO_DBG(pm8001_ha,
1588 pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
1589 ts->resp = SAS_TASK_COMPLETE;
1590 ts->stat = SAS_OPEN_REJECT;
1591 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
1592 break;
1593 case IO_XFER_ERROR_NAK_RECEIVED:
1594 PM8001_IO_DBG(pm8001_ha,
1595 pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
1596 ts->resp = SAS_TASK_COMPLETE;
1597 ts->stat = SAS_OPEN_REJECT;
1598 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1599 break;
1600 case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
1601 PM8001_IO_DBG(pm8001_ha,
1602 pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
1603 ts->resp = SAS_TASK_COMPLETE;
1604 ts->stat = SAS_NAK_R_ERR;
1605 break;
1606 case IO_XFER_OPEN_RETRY_TIMEOUT:
1607 PM8001_IO_DBG(pm8001_ha,
1608 pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
1609 pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
1610 return;
1611 case IO_XFER_ERROR_UNEXPECTED_PHASE:
1612 PM8001_IO_DBG(pm8001_ha,
1613 pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
1614 ts->resp = SAS_TASK_COMPLETE;
1615 ts->stat = SAS_DATA_OVERRUN;
1616 break;
1617 case IO_XFER_ERROR_XFER_RDY_OVERRUN:
1618 PM8001_IO_DBG(pm8001_ha,
1619 pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
1620 ts->resp = SAS_TASK_COMPLETE;
1621 ts->stat = SAS_DATA_OVERRUN;
1622 break;
1623 case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
1624 PM8001_IO_DBG(pm8001_ha,
1625 pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
1626 ts->resp = SAS_TASK_COMPLETE;
1627 ts->stat = SAS_DATA_OVERRUN;
1628 break;
1629 case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
1630 PM8001_IO_DBG(pm8001_ha,
1631 pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"));
1632 ts->resp = SAS_TASK_COMPLETE;
1633 ts->stat = SAS_DATA_OVERRUN;
1634 break;
1635 case IO_XFER_ERROR_OFFSET_MISMATCH:
1636 PM8001_IO_DBG(pm8001_ha,
1637 pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
1638 ts->resp = SAS_TASK_COMPLETE;
1639 ts->stat = SAS_DATA_OVERRUN;
1640 break;
1641 case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
1642 PM8001_IO_DBG(pm8001_ha,
1643 pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
1644 ts->resp = SAS_TASK_COMPLETE;
1645 ts->stat = SAS_DATA_OVERRUN;
1646 break;
1647 case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
1648 PM8001_IO_DBG(pm8001_ha,
1649 pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
1650 /* TBC: used default set values */
1651 ts->resp = SAS_TASK_COMPLETE;
1652 ts->stat = SAS_DATA_OVERRUN;
1653 break;
1654 case IO_XFER_CMD_FRAME_ISSUED:
1655 PM8001_IO_DBG(pm8001_ha,
1656 pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
1657 return;
1658 default:
1659 PM8001_IO_DBG(pm8001_ha,
1660 pm8001_printk("Unknown status 0x%x\n", event));
1661 /* not allowed case. Therefore, return failed status */
1662 ts->resp = SAS_TASK_COMPLETE;
1663 ts->stat = SAS_DATA_OVERRUN;
1664 break;
1665 }
1666 spin_lock_irqsave(&t->task_state_lock, flags);
1667 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
1668 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
1669 t->task_state_flags |= SAS_TASK_STATE_DONE;
1670 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
1671 spin_unlock_irqrestore(&t->task_state_lock, flags);
1672 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
1673 "task 0x%p done with event 0x%x resp 0x%x "
1674 "stat 0x%x but aborted by upper layer!\n",
1675 t, event, ts->resp, ts->stat));
1676 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1677 } else {
1678 spin_unlock_irqrestore(&t->task_state_lock, flags);
1679 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1680 mb();/* in order to force CPU ordering */
1681 t->task_done(t);
1682 }
1683}
1684
1685/*See the comments for mpi_ssp_completion */
1686static void
1687mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
1688{
1689 struct sas_task *t;
1690 struct pm8001_ccb_info *ccb;
1691 u32 param;
1692 u32 status;
1693 u32 tag;
1694 struct sata_completion_resp *psataPayload;
1695 struct task_status_struct *ts;
1696 struct ata_task_resp *resp ;
1697 u32 *sata_resp;
1698 struct pm8001_device *pm8001_dev;
1699 unsigned long flags;
1700
1701 psataPayload = (struct sata_completion_resp *)(piomb + 4);
1702 status = le32_to_cpu(psataPayload->status);
1703 tag = le32_to_cpu(psataPayload->tag);
1704
1705 if (!tag) {
1706 PM8001_FAIL_DBG(pm8001_ha,
1707 pm8001_printk("tag null\n"));
1708 return;
1709 }
1710 ccb = &pm8001_ha->ccb_info[tag];
1711 param = le32_to_cpu(psataPayload->param);
1712 if (ccb) {
1713 t = ccb->task;
1714 pm8001_dev = ccb->device;
1715 } else {
1716 PM8001_FAIL_DBG(pm8001_ha,
1717 pm8001_printk("ccb null\n"));
1718 return;
1719 }
1720
1721 if (t) {
1722 if (t->dev && (t->dev->lldd_dev))
1723 pm8001_dev = t->dev->lldd_dev;
1724 } else {
1725 PM8001_FAIL_DBG(pm8001_ha,
1726 pm8001_printk("task null\n"));
1727 return;
1728 }
1729
1730 if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
1731 && unlikely(!t || !t->lldd_task || !t->dev)) {
1732 PM8001_FAIL_DBG(pm8001_ha,
1733 pm8001_printk("task or dev null\n"));
1734 return;
1735 }
1736
1737 ts = &t->task_status;
1738 if (!ts) {
1739 PM8001_FAIL_DBG(pm8001_ha,
1740 pm8001_printk("ts null\n"));
1741 return;
1742 }
1743
1744 switch (status) {
1745 case IO_SUCCESS:
1746 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
1747 if (param == 0) {
1748 ts->resp = SAS_TASK_COMPLETE;
1749 ts->stat = SAM_STAT_GOOD;
1750 /* check if response is for SEND READ LOG */
1751 if (pm8001_dev &&
1752 (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
1753 /* set new bit for abort_all */
1754 pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
1755 /* clear bit for read log */
1756 pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
1757 pm80xx_send_abort_all(pm8001_ha, pm8001_dev);
1758 /* Free the tag */
1759 pm8001_tag_free(pm8001_ha, tag);
1760 sas_free_task(t);
1761 return;
1762 }
1763 } else {
1764 u8 len;
1765 ts->resp = SAS_TASK_COMPLETE;
1766 ts->stat = SAS_PROTO_RESPONSE;
1767 ts->residual = param;
1768 PM8001_IO_DBG(pm8001_ha,
1769 pm8001_printk("SAS_PROTO_RESPONSE len = %d\n",
1770 param));
1771 sata_resp = &psataPayload->sata_resp[0];
1772 resp = (struct ata_task_resp *)ts->buf;
1773 if (t->ata_task.dma_xfer == 0 &&
1774 t->data_dir == PCI_DMA_FROMDEVICE) {
1775 len = sizeof(struct pio_setup_fis);
1776 PM8001_IO_DBG(pm8001_ha,
1777 pm8001_printk("PIO read len = %d\n", len));
1778 } else if (t->ata_task.use_ncq) {
1779 len = sizeof(struct set_dev_bits_fis);
1780 PM8001_IO_DBG(pm8001_ha,
1781 pm8001_printk("FPDMA len = %d\n", len));
1782 } else {
1783 len = sizeof(struct dev_to_host_fis);
1784 PM8001_IO_DBG(pm8001_ha,
1785 pm8001_printk("other len = %d\n", len));
1786 }
1787 if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
1788 resp->frame_len = len;
1789 memcpy(&resp->ending_fis[0], sata_resp, len);
1790 ts->buf_valid_size = sizeof(*resp);
1791 } else
1792 PM8001_IO_DBG(pm8001_ha,
1793 pm8001_printk("response to large\n"));
1794 }
1795 if (pm8001_dev)
1796 pm8001_dev->running_req--;
1797 break;
1798 case IO_ABORTED:
1799 PM8001_IO_DBG(pm8001_ha,
1800 pm8001_printk("IO_ABORTED IOMB Tag\n"));
1801 ts->resp = SAS_TASK_COMPLETE;
1802 ts->stat = SAS_ABORTED_TASK;
1803 if (pm8001_dev)
1804 pm8001_dev->running_req--;
1805 break;
1806 /* following cases are to do cases */
1807 case IO_UNDERFLOW:
1808 /* SATA Completion with error */
1809 PM8001_IO_DBG(pm8001_ha,
1810 pm8001_printk("IO_UNDERFLOW param = %d\n", param));
1811 ts->resp = SAS_TASK_COMPLETE;
1812 ts->stat = SAS_DATA_UNDERRUN;
1813 ts->residual = param;
1814 if (pm8001_dev)
1815 pm8001_dev->running_req--;
1816 break;
1817 case IO_NO_DEVICE:
1818 PM8001_IO_DBG(pm8001_ha,
1819 pm8001_printk("IO_NO_DEVICE\n"));
1820 ts->resp = SAS_TASK_UNDELIVERED;
1821 ts->stat = SAS_PHY_DOWN;
1822 break;
1823 case IO_XFER_ERROR_BREAK:
1824 PM8001_IO_DBG(pm8001_ha,
1825 pm8001_printk("IO_XFER_ERROR_BREAK\n"));
1826 ts->resp = SAS_TASK_COMPLETE;
1827 ts->stat = SAS_INTERRUPTED;
1828 break;
1829 case IO_XFER_ERROR_PHY_NOT_READY:
1830 PM8001_IO_DBG(pm8001_ha,
1831 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
1832 ts->resp = SAS_TASK_COMPLETE;
1833 ts->stat = SAS_OPEN_REJECT;
1834 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1835 break;
1836 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
1837 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
1838 "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
1839 ts->resp = SAS_TASK_COMPLETE;
1840 ts->stat = SAS_OPEN_REJECT;
1841 ts->open_rej_reason = SAS_OREJ_EPROTO;
1842 break;
1843 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
1844 PM8001_IO_DBG(pm8001_ha,
1845 pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
1846 ts->resp = SAS_TASK_COMPLETE;
1847 ts->stat = SAS_OPEN_REJECT;
1848 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
1849 break;
1850 case IO_OPEN_CNX_ERROR_BREAK:
1851 PM8001_IO_DBG(pm8001_ha,
1852 pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
1853 ts->resp = SAS_TASK_COMPLETE;
1854 ts->stat = SAS_OPEN_REJECT;
1855 ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
1856 break;
1857 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
1858 case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
1859 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
1860 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
1861 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
1862 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
1863 PM8001_IO_DBG(pm8001_ha,
1864 pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
1865 ts->resp = SAS_TASK_COMPLETE;
1866 ts->stat = SAS_DEV_NO_RESPONSE;
1867 if (!t->uldd_task) {
1868 pm8001_handle_event(pm8001_ha,
1869 pm8001_dev,
1870 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
1871 ts->resp = SAS_TASK_UNDELIVERED;
1872 ts->stat = SAS_QUEUE_FULL;
1873 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1874 mb();/*in order to force CPU ordering*/
1875 spin_unlock_irq(&pm8001_ha->lock);
1876 t->task_done(t);
1877 spin_lock_irq(&pm8001_ha->lock);
1878 return;
1879 }
1880 break;
1881 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
1882 PM8001_IO_DBG(pm8001_ha,
1883 pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
1884 ts->resp = SAS_TASK_UNDELIVERED;
1885 ts->stat = SAS_OPEN_REJECT;
1886 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
1887 if (!t->uldd_task) {
1888 pm8001_handle_event(pm8001_ha,
1889 pm8001_dev,
1890 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
1891 ts->resp = SAS_TASK_UNDELIVERED;
1892 ts->stat = SAS_QUEUE_FULL;
1893 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1894 mb();/*ditto*/
1895 spin_unlock_irq(&pm8001_ha->lock);
1896 t->task_done(t);
1897 spin_lock_irq(&pm8001_ha->lock);
1898 return;
1899 }
1900 break;
1901 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
1902 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
1903 "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
1904 ts->resp = SAS_TASK_COMPLETE;
1905 ts->stat = SAS_OPEN_REJECT;
1906 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
1907 break;
1908 case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
1909 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
1910 "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n"));
1911 ts->resp = SAS_TASK_COMPLETE;
1912 ts->stat = SAS_DEV_NO_RESPONSE;
1913 if (!t->uldd_task) {
1914 pm8001_handle_event(pm8001_ha,
1915 pm8001_dev,
1916 IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
1917 ts->resp = SAS_TASK_UNDELIVERED;
1918 ts->stat = SAS_QUEUE_FULL;
1919 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1920 mb();/* ditto*/
1921 spin_unlock_irq(&pm8001_ha->lock);
1922 t->task_done(t);
1923 spin_lock_irq(&pm8001_ha->lock);
1924 return;
1925 }
1926 break;
1927 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
1928 PM8001_IO_DBG(pm8001_ha,
1929 pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
1930 ts->resp = SAS_TASK_COMPLETE;
1931 ts->stat = SAS_OPEN_REJECT;
1932 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
1933 break;
1934 case IO_XFER_ERROR_NAK_RECEIVED:
1935 PM8001_IO_DBG(pm8001_ha,
1936 pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
1937 ts->resp = SAS_TASK_COMPLETE;
1938 ts->stat = SAS_NAK_R_ERR;
1939 break;
1940 case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
1941 PM8001_IO_DBG(pm8001_ha,
1942 pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
1943 ts->resp = SAS_TASK_COMPLETE;
1944 ts->stat = SAS_NAK_R_ERR;
1945 break;
1946 case IO_XFER_ERROR_DMA:
1947 PM8001_IO_DBG(pm8001_ha,
1948 pm8001_printk("IO_XFER_ERROR_DMA\n"));
1949 ts->resp = SAS_TASK_COMPLETE;
1950 ts->stat = SAS_ABORTED_TASK;
1951 break;
1952 case IO_XFER_ERROR_SATA_LINK_TIMEOUT:
1953 PM8001_IO_DBG(pm8001_ha,
1954 pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"));
1955 ts->resp = SAS_TASK_UNDELIVERED;
1956 ts->stat = SAS_DEV_NO_RESPONSE;
1957 break;
1958 case IO_XFER_ERROR_REJECTED_NCQ_MODE:
1959 PM8001_IO_DBG(pm8001_ha,
1960 pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
1961 ts->resp = SAS_TASK_COMPLETE;
1962 ts->stat = SAS_DATA_UNDERRUN;
1963 break;
1964 case IO_XFER_OPEN_RETRY_TIMEOUT:
1965 PM8001_IO_DBG(pm8001_ha,
1966 pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
1967 ts->resp = SAS_TASK_COMPLETE;
1968 ts->stat = SAS_OPEN_TO;
1969 break;
1970 case IO_PORT_IN_RESET:
1971 PM8001_IO_DBG(pm8001_ha,
1972 pm8001_printk("IO_PORT_IN_RESET\n"));
1973 ts->resp = SAS_TASK_COMPLETE;
1974 ts->stat = SAS_DEV_NO_RESPONSE;
1975 break;
1976 case IO_DS_NON_OPERATIONAL:
1977 PM8001_IO_DBG(pm8001_ha,
1978 pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
1979 ts->resp = SAS_TASK_COMPLETE;
1980 ts->stat = SAS_DEV_NO_RESPONSE;
1981 if (!t->uldd_task) {
1982 pm8001_handle_event(pm8001_ha, pm8001_dev,
1983 IO_DS_NON_OPERATIONAL);
1984 ts->resp = SAS_TASK_UNDELIVERED;
1985 ts->stat = SAS_QUEUE_FULL;
1986 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1987 mb();/*ditto*/
1988 spin_unlock_irq(&pm8001_ha->lock);
1989 t->task_done(t);
1990 spin_lock_irq(&pm8001_ha->lock);
1991 return;
1992 }
1993 break;
1994 case IO_DS_IN_RECOVERY:
1995 PM8001_IO_DBG(pm8001_ha,
1996 pm8001_printk("IO_DS_IN_RECOVERY\n"));
1997 ts->resp = SAS_TASK_COMPLETE;
1998 ts->stat = SAS_DEV_NO_RESPONSE;
1999 break;
2000 case IO_DS_IN_ERROR:
2001 PM8001_IO_DBG(pm8001_ha,
2002 pm8001_printk("IO_DS_IN_ERROR\n"));
2003 ts->resp = SAS_TASK_COMPLETE;
2004 ts->stat = SAS_DEV_NO_RESPONSE;
2005 if (!t->uldd_task) {
2006 pm8001_handle_event(pm8001_ha, pm8001_dev,
2007 IO_DS_IN_ERROR);
2008 ts->resp = SAS_TASK_UNDELIVERED;
2009 ts->stat = SAS_QUEUE_FULL;
2010 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2011 mb();/*ditto*/
2012 spin_unlock_irq(&pm8001_ha->lock);
2013 t->task_done(t);
2014 spin_lock_irq(&pm8001_ha->lock);
2015 return;
2016 }
2017 break;
2018 case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
2019 PM8001_IO_DBG(pm8001_ha,
2020 pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
2021 ts->resp = SAS_TASK_COMPLETE;
2022 ts->stat = SAS_OPEN_REJECT;
2023 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2024 default:
2025 PM8001_IO_DBG(pm8001_ha,
2026 pm8001_printk("Unknown status 0x%x\n", status));
2027 /* not allowed case. Therefore, return failed status */
2028 ts->resp = SAS_TASK_COMPLETE;
2029 ts->stat = SAS_DEV_NO_RESPONSE;
2030 break;
2031 }
2032 spin_lock_irqsave(&t->task_state_lock, flags);
2033 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
2034 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
2035 t->task_state_flags |= SAS_TASK_STATE_DONE;
2036 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
2037 spin_unlock_irqrestore(&t->task_state_lock, flags);
2038 PM8001_FAIL_DBG(pm8001_ha,
2039 pm8001_printk("task 0x%p done with io_status 0x%x"
2040 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2041 t, status, ts->resp, ts->stat));
2042 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2043 } else if (t->uldd_task) {
2044 spin_unlock_irqrestore(&t->task_state_lock, flags);
2045 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2046 mb();/* ditto */
2047 spin_unlock_irq(&pm8001_ha->lock);
2048 t->task_done(t);
2049 spin_lock_irq(&pm8001_ha->lock);
2050 } else if (!t->uldd_task) {
2051 spin_unlock_irqrestore(&t->task_state_lock, flags);
2052 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2053 mb();/*ditto*/
2054 spin_unlock_irq(&pm8001_ha->lock);
2055 t->task_done(t);
2056 spin_lock_irq(&pm8001_ha->lock);
2057 }
2058}
2059
2060/*See the comments for mpi_ssp_completion */
2061static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2062{
2063 struct sas_task *t;
2064 struct task_status_struct *ts;
2065 struct pm8001_ccb_info *ccb;
2066 struct pm8001_device *pm8001_dev;
2067 struct sata_event_resp *psataPayload =
2068 (struct sata_event_resp *)(piomb + 4);
2069 u32 event = le32_to_cpu(psataPayload->event);
2070 u32 tag = le32_to_cpu(psataPayload->tag);
2071 u32 port_id = le32_to_cpu(psataPayload->port_id);
2072 u32 dev_id = le32_to_cpu(psataPayload->device_id);
2073 unsigned long flags;
2074
2075 ccb = &pm8001_ha->ccb_info[tag];
2076
2077 if (ccb) {
2078 t = ccb->task;
2079 pm8001_dev = ccb->device;
2080 } else {
2081 PM8001_FAIL_DBG(pm8001_ha,
2082 pm8001_printk("No CCB !!!. returning\n"));
2083 return;
2084 }
2085 if (event)
2086 PM8001_FAIL_DBG(pm8001_ha,
2087 pm8001_printk("SATA EVENT 0x%x\n", event));
2088
2089 /* Check if this is NCQ error */
2090 if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
2091 /* find device using device id */
2092 pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
2093 /* send read log extension */
2094 if (pm8001_dev)
2095 pm80xx_send_read_log(pm8001_ha, pm8001_dev);
2096 return;
2097 }
2098
2099 if (unlikely(!t || !t->lldd_task || !t->dev)) {
2100 PM8001_FAIL_DBG(pm8001_ha,
2101 pm8001_printk("task or dev null\n"));
2102 return;
2103 }
2104
2105 ts = &t->task_status;
2106 PM8001_IO_DBG(pm8001_ha,
2107 pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
2108 port_id, tag, event));
2109 switch (event) {
2110 case IO_OVERFLOW:
2111 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
2112 ts->resp = SAS_TASK_COMPLETE;
2113 ts->stat = SAS_DATA_OVERRUN;
2114 ts->residual = 0;
2115 if (pm8001_dev)
2116 pm8001_dev->running_req--;
2117 break;
2118 case IO_XFER_ERROR_BREAK:
2119 PM8001_IO_DBG(pm8001_ha,
2120 pm8001_printk("IO_XFER_ERROR_BREAK\n"));
2121 ts->resp = SAS_TASK_COMPLETE;
2122 ts->stat = SAS_INTERRUPTED;
2123 break;
2124 case IO_XFER_ERROR_PHY_NOT_READY:
2125 PM8001_IO_DBG(pm8001_ha,
2126 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
2127 ts->resp = SAS_TASK_COMPLETE;
2128 ts->stat = SAS_OPEN_REJECT;
2129 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2130 break;
2131 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
2132 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
2133 "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
2134 ts->resp = SAS_TASK_COMPLETE;
2135 ts->stat = SAS_OPEN_REJECT;
2136 ts->open_rej_reason = SAS_OREJ_EPROTO;
2137 break;
2138 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
2139 PM8001_IO_DBG(pm8001_ha,
2140 pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
2141 ts->resp = SAS_TASK_COMPLETE;
2142 ts->stat = SAS_OPEN_REJECT;
2143 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2144 break;
2145 case IO_OPEN_CNX_ERROR_BREAK:
2146 PM8001_IO_DBG(pm8001_ha,
2147 pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
2148 ts->resp = SAS_TASK_COMPLETE;
2149 ts->stat = SAS_OPEN_REJECT;
2150 ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
2151 break;
2152 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
2153 case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
2154 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
2155 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
2156 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
2157 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
2158 PM8001_FAIL_DBG(pm8001_ha,
2159 pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
2160 ts->resp = SAS_TASK_UNDELIVERED;
2161 ts->stat = SAS_DEV_NO_RESPONSE;
2162 if (!t->uldd_task) {
2163 pm8001_handle_event(pm8001_ha,
2164 pm8001_dev,
2165 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2166 ts->resp = SAS_TASK_COMPLETE;
2167 ts->stat = SAS_QUEUE_FULL;
2168 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2169 mb();/*ditto*/
2170 spin_unlock_irq(&pm8001_ha->lock);
2171 t->task_done(t);
2172 spin_lock_irq(&pm8001_ha->lock);
2173 return;
2174 }
2175 break;
2176 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
2177 PM8001_IO_DBG(pm8001_ha,
2178 pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
2179 ts->resp = SAS_TASK_UNDELIVERED;
2180 ts->stat = SAS_OPEN_REJECT;
2181 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
2182 break;
2183 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
2184 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
2185 "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
2186 ts->resp = SAS_TASK_COMPLETE;
2187 ts->stat = SAS_OPEN_REJECT;
2188 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
2189 break;
2190 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
2191 PM8001_IO_DBG(pm8001_ha,
2192 pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
2193 ts->resp = SAS_TASK_COMPLETE;
2194 ts->stat = SAS_OPEN_REJECT;
2195 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
2196 break;
2197 case IO_XFER_ERROR_NAK_RECEIVED:
2198 PM8001_IO_DBG(pm8001_ha,
2199 pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
2200 ts->resp = SAS_TASK_COMPLETE;
2201 ts->stat = SAS_NAK_R_ERR;
2202 break;
2203 case IO_XFER_ERROR_PEER_ABORTED:
2204 PM8001_IO_DBG(pm8001_ha,
2205 pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n"));
2206 ts->resp = SAS_TASK_COMPLETE;
2207 ts->stat = SAS_NAK_R_ERR;
2208 break;
2209 case IO_XFER_ERROR_REJECTED_NCQ_MODE:
2210 PM8001_IO_DBG(pm8001_ha,
2211 pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
2212 ts->resp = SAS_TASK_COMPLETE;
2213 ts->stat = SAS_DATA_UNDERRUN;
2214 break;
2215 case IO_XFER_OPEN_RETRY_TIMEOUT:
2216 PM8001_IO_DBG(pm8001_ha,
2217 pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
2218 ts->resp = SAS_TASK_COMPLETE;
2219 ts->stat = SAS_OPEN_TO;
2220 break;
2221 case IO_XFER_ERROR_UNEXPECTED_PHASE:
2222 PM8001_IO_DBG(pm8001_ha,
2223 pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
2224 ts->resp = SAS_TASK_COMPLETE;
2225 ts->stat = SAS_OPEN_TO;
2226 break;
2227 case IO_XFER_ERROR_XFER_RDY_OVERRUN:
2228 PM8001_IO_DBG(pm8001_ha,
2229 pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
2230 ts->resp = SAS_TASK_COMPLETE;
2231 ts->stat = SAS_OPEN_TO;
2232 break;
2233 case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
2234 PM8001_IO_DBG(pm8001_ha,
2235 pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
2236 ts->resp = SAS_TASK_COMPLETE;
2237 ts->stat = SAS_OPEN_TO;
2238 break;
2239 case IO_XFER_ERROR_OFFSET_MISMATCH:
2240 PM8001_IO_DBG(pm8001_ha,
2241 pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
2242 ts->resp = SAS_TASK_COMPLETE;
2243 ts->stat = SAS_OPEN_TO;
2244 break;
2245 case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
2246 PM8001_IO_DBG(pm8001_ha,
2247 pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
2248 ts->resp = SAS_TASK_COMPLETE;
2249 ts->stat = SAS_OPEN_TO;
2250 break;
2251 case IO_XFER_CMD_FRAME_ISSUED:
2252 PM8001_IO_DBG(pm8001_ha,
2253 pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
2254 break;
2255 case IO_XFER_PIO_SETUP_ERROR:
2256 PM8001_IO_DBG(pm8001_ha,
2257 pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n"));
2258 ts->resp = SAS_TASK_COMPLETE;
2259 ts->stat = SAS_OPEN_TO;
2260 break;
2261 case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
2262 PM8001_FAIL_DBG(pm8001_ha,
2263 pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
2264 /* TBC: used default set values */
2265 ts->resp = SAS_TASK_COMPLETE;
2266 ts->stat = SAS_OPEN_TO;
2267 break;
2268 case IO_XFER_DMA_ACTIVATE_TIMEOUT:
2269 PM8001_FAIL_DBG(pm8001_ha,
2270 pm8001_printk("IO_XFR_DMA_ACTIVATE_TIMEOUT\n"));
2271 /* TBC: used default set values */
2272 ts->resp = SAS_TASK_COMPLETE;
2273 ts->stat = SAS_OPEN_TO;
2274 break;
2275 default:
2276 PM8001_IO_DBG(pm8001_ha,
2277 pm8001_printk("Unknown status 0x%x\n", event));
2278 /* not allowed case. Therefore, return failed status */
2279 ts->resp = SAS_TASK_COMPLETE;
2280 ts->stat = SAS_OPEN_TO;
2281 break;
2282 }
2283 spin_lock_irqsave(&t->task_state_lock, flags);
2284 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
2285 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
2286 t->task_state_flags |= SAS_TASK_STATE_DONE;
2287 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
2288 spin_unlock_irqrestore(&t->task_state_lock, flags);
2289 PM8001_FAIL_DBG(pm8001_ha,
2290 pm8001_printk("task 0x%p done with io_status 0x%x"
2291 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2292 t, event, ts->resp, ts->stat));
2293 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2294 } else if (t->uldd_task) {
2295 spin_unlock_irqrestore(&t->task_state_lock, flags);
2296 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2297 mb();/* ditto */
2298 spin_unlock_irq(&pm8001_ha->lock);
2299 t->task_done(t);
2300 spin_lock_irq(&pm8001_ha->lock);
2301 } else if (!t->uldd_task) {
2302 spin_unlock_irqrestore(&t->task_state_lock, flags);
2303 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2304 mb();/*ditto*/
2305 spin_unlock_irq(&pm8001_ha->lock);
2306 t->task_done(t);
2307 spin_lock_irq(&pm8001_ha->lock);
2308 }
2309}
2310
2311/*See the comments for mpi_ssp_completion */
2312static void
2313mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2314{
2315 u32 param, i;
2316 struct sas_task *t;
2317 struct pm8001_ccb_info *ccb;
2318 unsigned long flags;
2319 u32 status;
2320 u32 tag;
2321 struct smp_completion_resp *psmpPayload;
2322 struct task_status_struct *ts;
2323 struct pm8001_device *pm8001_dev;
2324 char *pdma_respaddr = NULL;
2325
2326 psmpPayload = (struct smp_completion_resp *)(piomb + 4);
2327 status = le32_to_cpu(psmpPayload->status);
2328 tag = le32_to_cpu(psmpPayload->tag);
2329
2330 ccb = &pm8001_ha->ccb_info[tag];
2331 param = le32_to_cpu(psmpPayload->param);
2332 t = ccb->task;
2333 ts = &t->task_status;
2334 pm8001_dev = ccb->device;
2335 if (status)
2336 PM8001_FAIL_DBG(pm8001_ha,
2337 pm8001_printk("smp IO status 0x%x\n", status));
2338 if (unlikely(!t || !t->lldd_task || !t->dev))
2339 return;
2340
2341 switch (status) {
2342
2343 case IO_SUCCESS:
2344 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
2345 ts->resp = SAS_TASK_COMPLETE;
2346 ts->stat = SAM_STAT_GOOD;
2347 if (pm8001_dev)
2348 pm8001_dev->running_req--;
2349 if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
2350 PM8001_IO_DBG(pm8001_ha,
2351 pm8001_printk("DIRECT RESPONSE Length:%d\n",
2352 param));
2353 pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64
2354 ((u64)sg_dma_address
2355 (&t->smp_task.smp_resp))));
2356 for (i = 0; i < param; i++) {
2357 *(pdma_respaddr+i) = psmpPayload->_r_a[i];
2358 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
2359 "SMP Byte%d DMA data 0x%x psmp 0x%x\n",
2360 i, *(pdma_respaddr+i),
2361 psmpPayload->_r_a[i]));
2362 }
2363 }
2364 break;
2365 case IO_ABORTED:
2366 PM8001_IO_DBG(pm8001_ha,
2367 pm8001_printk("IO_ABORTED IOMB\n"));
2368 ts->resp = SAS_TASK_COMPLETE;
2369 ts->stat = SAS_ABORTED_TASK;
2370 if (pm8001_dev)
2371 pm8001_dev->running_req--;
2372 break;
2373 case IO_OVERFLOW:
2374 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
2375 ts->resp = SAS_TASK_COMPLETE;
2376 ts->stat = SAS_DATA_OVERRUN;
2377 ts->residual = 0;
2378 if (pm8001_dev)
2379 pm8001_dev->running_req--;
2380 break;
2381 case IO_NO_DEVICE:
2382 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n"));
2383 ts->resp = SAS_TASK_COMPLETE;
2384 ts->stat = SAS_PHY_DOWN;
2385 break;
2386 case IO_ERROR_HW_TIMEOUT:
2387 PM8001_IO_DBG(pm8001_ha,
2388 pm8001_printk("IO_ERROR_HW_TIMEOUT\n"));
2389 ts->resp = SAS_TASK_COMPLETE;
2390 ts->stat = SAM_STAT_BUSY;
2391 break;
2392 case IO_XFER_ERROR_BREAK:
2393 PM8001_IO_DBG(pm8001_ha,
2394 pm8001_printk("IO_XFER_ERROR_BREAK\n"));
2395 ts->resp = SAS_TASK_COMPLETE;
2396 ts->stat = SAM_STAT_BUSY;
2397 break;
2398 case IO_XFER_ERROR_PHY_NOT_READY:
2399 PM8001_IO_DBG(pm8001_ha,
2400 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
2401 ts->resp = SAS_TASK_COMPLETE;
2402 ts->stat = SAM_STAT_BUSY;
2403 break;
2404 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
2405 PM8001_IO_DBG(pm8001_ha,
2406 pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
2407 ts->resp = SAS_TASK_COMPLETE;
2408 ts->stat = SAS_OPEN_REJECT;
2409 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2410 break;
2411 case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
2412 PM8001_IO_DBG(pm8001_ha,
2413 pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
2414 ts->resp = SAS_TASK_COMPLETE;
2415 ts->stat = SAS_OPEN_REJECT;
2416 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2417 break;
2418 case IO_OPEN_CNX_ERROR_BREAK:
2419 PM8001_IO_DBG(pm8001_ha,
2420 pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
2421 ts->resp = SAS_TASK_COMPLETE;
2422 ts->stat = SAS_OPEN_REJECT;
2423 ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
2424 break;
2425 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
2426 case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
2427 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
2428 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
2429 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
2430 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
2431 PM8001_IO_DBG(pm8001_ha,
2432 pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
2433 ts->resp = SAS_TASK_COMPLETE;
2434 ts->stat = SAS_OPEN_REJECT;
2435 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
2436 pm8001_handle_event(pm8001_ha,
2437 pm8001_dev,
2438 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
2439 break;
2440 case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
2441 PM8001_IO_DBG(pm8001_ha,
2442 pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
2443 ts->resp = SAS_TASK_COMPLETE;
2444 ts->stat = SAS_OPEN_REJECT;
2445 ts->open_rej_reason = SAS_OREJ_BAD_DEST;
2446 break;
2447 case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
2448 PM8001_IO_DBG(pm8001_ha, pm8001_printk(\
2449 "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
2450 ts->resp = SAS_TASK_COMPLETE;
2451 ts->stat = SAS_OPEN_REJECT;
2452 ts->open_rej_reason = SAS_OREJ_CONN_RATE;
2453 break;
2454 case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
2455 PM8001_IO_DBG(pm8001_ha,
2456 pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
2457 ts->resp = SAS_TASK_COMPLETE;
2458 ts->stat = SAS_OPEN_REJECT;
2459 ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
2460 break;
2461 case IO_XFER_ERROR_RX_FRAME:
2462 PM8001_IO_DBG(pm8001_ha,
2463 pm8001_printk("IO_XFER_ERROR_RX_FRAME\n"));
2464 ts->resp = SAS_TASK_COMPLETE;
2465 ts->stat = SAS_DEV_NO_RESPONSE;
2466 break;
2467 case IO_XFER_OPEN_RETRY_TIMEOUT:
2468 PM8001_IO_DBG(pm8001_ha,
2469 pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
2470 ts->resp = SAS_TASK_COMPLETE;
2471 ts->stat = SAS_OPEN_REJECT;
2472 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2473 break;
2474 case IO_ERROR_INTERNAL_SMP_RESOURCE:
2475 PM8001_IO_DBG(pm8001_ha,
2476 pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n"));
2477 ts->resp = SAS_TASK_COMPLETE;
2478 ts->stat = SAS_QUEUE_FULL;
2479 break;
2480 case IO_PORT_IN_RESET:
2481 PM8001_IO_DBG(pm8001_ha,
2482 pm8001_printk("IO_PORT_IN_RESET\n"));
2483 ts->resp = SAS_TASK_COMPLETE;
2484 ts->stat = SAS_OPEN_REJECT;
2485 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2486 break;
2487 case IO_DS_NON_OPERATIONAL:
2488 PM8001_IO_DBG(pm8001_ha,
2489 pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
2490 ts->resp = SAS_TASK_COMPLETE;
2491 ts->stat = SAS_DEV_NO_RESPONSE;
2492 break;
2493 case IO_DS_IN_RECOVERY:
2494 PM8001_IO_DBG(pm8001_ha,
2495 pm8001_printk("IO_DS_IN_RECOVERY\n"));
2496 ts->resp = SAS_TASK_COMPLETE;
2497 ts->stat = SAS_OPEN_REJECT;
2498 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2499 break;
2500 case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
2501 PM8001_IO_DBG(pm8001_ha,
2502 pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
2503 ts->resp = SAS_TASK_COMPLETE;
2504 ts->stat = SAS_OPEN_REJECT;
2505 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2506 break;
2507 default:
2508 PM8001_IO_DBG(pm8001_ha,
2509 pm8001_printk("Unknown status 0x%x\n", status));
2510 ts->resp = SAS_TASK_COMPLETE;
2511 ts->stat = SAS_DEV_NO_RESPONSE;
2512 /* not allowed case. Therefore, return failed status */
2513 break;
2514 }
2515 spin_lock_irqsave(&t->task_state_lock, flags);
2516 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
2517 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
2518 t->task_state_flags |= SAS_TASK_STATE_DONE;
2519 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
2520 spin_unlock_irqrestore(&t->task_state_lock, flags);
2521 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
2522 "task 0x%p done with io_status 0x%x resp 0x%x"
2523 "stat 0x%x but aborted by upper layer!\n",
2524 t, status, ts->resp, ts->stat));
2525 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2526 } else {
2527 spin_unlock_irqrestore(&t->task_state_lock, flags);
2528 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2529 mb();/* in order to force CPU ordering */
2530 t->task_done(t);
2531 }
2532}
2533
2534/**
2535 * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW.
2536 * @pm8001_ha: our hba card information
2537 * @Qnum: the outbound queue message number.
2538 * @SEA: source of event to ack
2539 * @port_id: port id.
2540 * @phyId: phy id.
2541 * @param0: parameter 0.
2542 * @param1: parameter 1.
2543 */
2544static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
2545 u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1)
2546{
2547 struct hw_event_ack_req payload;
2548 u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
2549
2550 struct inbound_queue_table *circularQ;
2551
2552 memset((u8 *)&payload, 0, sizeof(payload));
2553 circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
2554 payload.tag = cpu_to_le32(1);
2555 payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
2556 ((phyId & 0xFF) << 24) | (port_id & 0xFF));
2557 payload.param0 = cpu_to_le32(param0);
2558 payload.param1 = cpu_to_le32(param1);
2559 pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
2560}
2561
2562static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
2563 u32 phyId, u32 phy_op);
2564
2565/**
2566 * hw_event_sas_phy_up -FW tells me a SAS phy up event.
2567 * @pm8001_ha: our hba card information
2568 * @piomb: IO message buffer
2569 */
2570static void
2571hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2572{
2573 struct hw_event_resp *pPayload =
2574 (struct hw_event_resp *)(piomb + 4);
2575 u32 lr_status_evt_portid =
2576 le32_to_cpu(pPayload->lr_status_evt_portid);
2577 u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
2578
2579 u8 link_rate =
2580 (u8)((lr_status_evt_portid & 0xF0000000) >> 28);
2581 u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
2582 u8 phy_id =
2583 (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
2584 u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
2585
2586 struct pm8001_port *port = &pm8001_ha->port[port_id];
2587 struct sas_ha_struct *sas_ha = pm8001_ha->sas;
2588 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2589 unsigned long flags;
2590 u8 deviceType = pPayload->sas_identify.dev_type;
2591 port->port_state = portstate;
2592 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
2593 "portid:%d; phyid:%d; linkrate:%d; "
2594 "portstate:%x; devicetype:%x\n",
2595 port_id, phy_id, link_rate, portstate, deviceType));
2596
2597 switch (deviceType) {
2598 case SAS_PHY_UNUSED:
2599 PM8001_MSG_DBG(pm8001_ha,
2600 pm8001_printk("device type no device.\n"));
2601 break;
2602 case SAS_END_DEVICE:
2603 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
2604 pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id,
2605 PHY_NOTIFY_ENABLE_SPINUP);
2606 port->port_attached = 1;
2607 pm8001_get_lrate_mode(phy, link_rate);
2608 break;
2609 case SAS_EDGE_EXPANDER_DEVICE:
2610 PM8001_MSG_DBG(pm8001_ha,
2611 pm8001_printk("expander device.\n"));
2612 port->port_attached = 1;
2613 pm8001_get_lrate_mode(phy, link_rate);
2614 break;
2615 case SAS_FANOUT_EXPANDER_DEVICE:
2616 PM8001_MSG_DBG(pm8001_ha,
2617 pm8001_printk("fanout expander device.\n"));
2618 port->port_attached = 1;
2619 pm8001_get_lrate_mode(phy, link_rate);
2620 break;
2621 default:
2622 PM8001_MSG_DBG(pm8001_ha,
2623 pm8001_printk("unknown device type(%x)\n", deviceType));
2624 break;
2625 }
2626 phy->phy_type |= PORT_TYPE_SAS;
2627 phy->identify.device_type = deviceType;
2628 phy->phy_attached = 1;
2629 if (phy->identify.device_type == SAS_END_DEVICE)
2630 phy->identify.target_port_protocols = SAS_PROTOCOL_SSP;
2631 else if (phy->identify.device_type != SAS_PHY_UNUSED)
2632 phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
2633 phy->sas_phy.oob_mode = SAS_OOB_MODE;
2634 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
2635 spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
2636 memcpy(phy->frame_rcvd, &pPayload->sas_identify,
2637 sizeof(struct sas_identify_frame)-4);
2638 phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4;
2639 pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
2640 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
2641 if (pm8001_ha->flags == PM8001F_RUN_TIME)
2642 mdelay(200);/*delay a moment to wait disk to spinup*/
2643 pm8001_bytes_dmaed(pm8001_ha, phy_id);
2644}
2645
2646/**
2647 * hw_event_sata_phy_up -FW tells me a SATA phy up event.
2648 * @pm8001_ha: our hba card information
2649 * @piomb: IO message buffer
2650 */
2651static void
2652hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2653{
2654 struct hw_event_resp *pPayload =
2655 (struct hw_event_resp *)(piomb + 4);
2656 u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
2657 u32 lr_status_evt_portid =
2658 le32_to_cpu(pPayload->lr_status_evt_portid);
2659 u8 link_rate =
2660 (u8)((lr_status_evt_portid & 0xF0000000) >> 28);
2661 u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
2662 u8 phy_id =
2663 (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
2664
2665 u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
2666
2667 struct pm8001_port *port = &pm8001_ha->port[port_id];
2668 struct sas_ha_struct *sas_ha = pm8001_ha->sas;
2669 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2670 unsigned long flags;
2671 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
2672 "port id %d, phy id %d link_rate %d portstate 0x%x\n",
2673 port_id, phy_id, link_rate, portstate));
2674
2675 port->port_state = portstate;
2676 port->port_attached = 1;
2677 pm8001_get_lrate_mode(phy, link_rate);
2678 phy->phy_type |= PORT_TYPE_SATA;
2679 phy->phy_attached = 1;
2680 phy->sas_phy.oob_mode = SATA_OOB_MODE;
2681 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
2682 spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
2683 memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
2684 sizeof(struct dev_to_host_fis));
2685 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
2686 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
2687 phy->identify.device_type = SAS_SATA_DEV;
2688 pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
2689 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
2690 pm8001_bytes_dmaed(pm8001_ha, phy_id);
2691}
2692
2693/**
2694 * hw_event_phy_down -we should notify the libsas the phy is down.
2695 * @pm8001_ha: our hba card information
2696 * @piomb: IO message buffer
2697 */
2698static void
2699hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
2700{
2701 struct hw_event_resp *pPayload =
2702 (struct hw_event_resp *)(piomb + 4);
2703
2704 u32 lr_status_evt_portid =
2705 le32_to_cpu(pPayload->lr_status_evt_portid);
2706 u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
2707 u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
2708 u8 phy_id =
2709 (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
2710 u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
2711
2712 struct pm8001_port *port = &pm8001_ha->port[port_id];
2713 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2714 port->port_state = portstate;
2715 phy->phy_type = 0;
2716 phy->identify.device_type = 0;
2717 phy->phy_attached = 0;
2718 memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
2719 switch (portstate) {
2720 case PORT_VALID:
2721 break;
2722 case PORT_INVALID:
2723 PM8001_MSG_DBG(pm8001_ha,
2724 pm8001_printk(" PortInvalid portID %d\n", port_id));
2725 PM8001_MSG_DBG(pm8001_ha,
2726 pm8001_printk(" Last phy Down and port invalid\n"));
2727 port->port_attached = 0;
2728 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
2729 port_id, phy_id, 0, 0);
2730 break;
2731 case PORT_IN_RESET:
2732 PM8001_MSG_DBG(pm8001_ha,
2733 pm8001_printk(" Port In Reset portID %d\n", port_id));
2734 break;
2735 case PORT_NOT_ESTABLISHED:
2736 PM8001_MSG_DBG(pm8001_ha,
2737 pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
2738 port->port_attached = 0;
2739 break;
2740 case PORT_LOSTCOMM:
2741 PM8001_MSG_DBG(pm8001_ha,
2742 pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
2743 PM8001_MSG_DBG(pm8001_ha,
2744 pm8001_printk(" Last phy Down and port invalid\n"));
2745 port->port_attached = 0;
2746 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
2747 port_id, phy_id, 0, 0);
2748 break;
2749 default:
2750 port->port_attached = 0;
2751 PM8001_MSG_DBG(pm8001_ha,
2752 pm8001_printk(" phy Down and(default) = 0x%x\n",
2753 portstate));
2754 break;
2755
2756 }
2757}
2758
2759static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
2760{
2761 struct phy_start_resp *pPayload =
2762 (struct phy_start_resp *)(piomb + 4);
2763 u32 status =
2764 le32_to_cpu(pPayload->status);
2765 u32 phy_id =
2766 le32_to_cpu(pPayload->phyid);
2767 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2768
2769 PM8001_INIT_DBG(pm8001_ha,
2770 pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n",
2771 status, phy_id));
2772 if (status == 0) {
2773 phy->phy_state = 1;
2774 if (pm8001_ha->flags == PM8001F_RUN_TIME)
2775 complete(phy->enable_completion);
2776 }
2777 return 0;
2778
2779}
2780
2781/**
2782 * mpi_thermal_hw_event -The hw event has come.
2783 * @pm8001_ha: our hba card information
2784 * @piomb: IO message buffer
2785 */
2786static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
2787{
2788 struct thermal_hw_event *pPayload =
2789 (struct thermal_hw_event *)(piomb + 4);
2790
2791 u32 thermal_event = le32_to_cpu(pPayload->thermal_event);
2792 u32 rht_lht = le32_to_cpu(pPayload->rht_lht);
2793
2794 if (thermal_event & 0x40) {
2795 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
2796 "Thermal Event: Local high temperature violated!\n"));
2797 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
2798 "Thermal Event: Measured local high temperature %d\n",
2799 ((rht_lht & 0xFF00) >> 8)));
2800 }
2801 if (thermal_event & 0x10) {
2802 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
2803 "Thermal Event: Remote high temperature violated!\n"));
2804 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
2805 "Thermal Event: Measured remote high temperature %d\n",
2806 ((rht_lht & 0xFF000000) >> 24)));
2807 }
2808 return 0;
2809}
2810
2811/**
2812 * mpi_hw_event -The hw event has come.
2813 * @pm8001_ha: our hba card information
2814 * @piomb: IO message buffer
2815 */
2816static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
2817{
2818 unsigned long flags;
2819 struct hw_event_resp *pPayload =
2820 (struct hw_event_resp *)(piomb + 4);
2821 u32 lr_status_evt_portid =
2822 le32_to_cpu(pPayload->lr_status_evt_portid);
2823 u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
2824 u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
2825 u8 phy_id =
2826 (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
2827 u16 eventType =
2828 (u16)((lr_status_evt_portid & 0x00FFFF00) >> 8);
2829 u8 status =
2830 (u8)((lr_status_evt_portid & 0x0F000000) >> 24);
2831
2832 struct sas_ha_struct *sas_ha = pm8001_ha->sas;
2833 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2834 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
2835 PM8001_MSG_DBG(pm8001_ha,
2836 pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
2837 port_id, phy_id, eventType, status));
2838
2839 switch (eventType) {
2840
2841 case HW_EVENT_SAS_PHY_UP:
2842 PM8001_MSG_DBG(pm8001_ha,
2843 pm8001_printk("HW_EVENT_PHY_START_STATUS\n"));
2844 hw_event_sas_phy_up(pm8001_ha, piomb);
2845 break;
2846 case HW_EVENT_SATA_PHY_UP:
2847 PM8001_MSG_DBG(pm8001_ha,
2848 pm8001_printk("HW_EVENT_SATA_PHY_UP\n"));
2849 hw_event_sata_phy_up(pm8001_ha, piomb);
2850 break;
2851 case HW_EVENT_SATA_SPINUP_HOLD:
2852 PM8001_MSG_DBG(pm8001_ha,
2853 pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n"));
2854 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
2855 break;
2856 case HW_EVENT_PHY_DOWN:
2857 PM8001_MSG_DBG(pm8001_ha,
2858 pm8001_printk("HW_EVENT_PHY_DOWN\n"));
2859 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
2860 phy->phy_attached = 0;
2861 phy->phy_state = 0;
2862 hw_event_phy_down(pm8001_ha, piomb);
2863 break;
2864 case HW_EVENT_PORT_INVALID:
2865 PM8001_MSG_DBG(pm8001_ha,
2866 pm8001_printk("HW_EVENT_PORT_INVALID\n"));
2867 sas_phy_disconnected(sas_phy);
2868 phy->phy_attached = 0;
2869 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
2870 break;
2871 /* the broadcast change primitive received, tell the LIBSAS this event
2872 to revalidate the sas domain*/
2873 case HW_EVENT_BROADCAST_CHANGE:
2874 PM8001_MSG_DBG(pm8001_ha,
2875 pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n"));
2876 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE,
2877 port_id, phy_id, 1, 0);
2878 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
2879 sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
2880 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
2881 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2882 break;
2883 case HW_EVENT_PHY_ERROR:
2884 PM8001_MSG_DBG(pm8001_ha,
2885 pm8001_printk("HW_EVENT_PHY_ERROR\n"));
2886 sas_phy_disconnected(&phy->sas_phy);
2887 phy->phy_attached = 0;
2888 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
2889 break;
2890 case HW_EVENT_BROADCAST_EXP:
2891 PM8001_MSG_DBG(pm8001_ha,
2892 pm8001_printk("HW_EVENT_BROADCAST_EXP\n"));
2893 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
2894 sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
2895 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
2896 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2897 break;
2898 case HW_EVENT_LINK_ERR_INVALID_DWORD:
2899 PM8001_MSG_DBG(pm8001_ha,
2900 pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
2901 pm80xx_hw_event_ack_req(pm8001_ha, 0,
2902 HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
2903 sas_phy_disconnected(sas_phy);
2904 phy->phy_attached = 0;
2905 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
2906 break;
2907 case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
2908 PM8001_MSG_DBG(pm8001_ha,
2909 pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"));
2910 pm80xx_hw_event_ack_req(pm8001_ha, 0,
2911 HW_EVENT_LINK_ERR_DISPARITY_ERROR,
2912 port_id, phy_id, 0, 0);
2913 sas_phy_disconnected(sas_phy);
2914 phy->phy_attached = 0;
2915 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
2916 break;
2917 case HW_EVENT_LINK_ERR_CODE_VIOLATION:
2918 PM8001_MSG_DBG(pm8001_ha,
2919 pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n"));
2920 pm80xx_hw_event_ack_req(pm8001_ha, 0,
2921 HW_EVENT_LINK_ERR_CODE_VIOLATION,
2922 port_id, phy_id, 0, 0);
2923 sas_phy_disconnected(sas_phy);
2924 phy->phy_attached = 0;
2925 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
2926 break;
2927 case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
2928 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
2929 "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"));
2930 pm80xx_hw_event_ack_req(pm8001_ha, 0,
2931 HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
2932 port_id, phy_id, 0, 0);
2933 sas_phy_disconnected(sas_phy);
2934 phy->phy_attached = 0;
2935 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
2936 break;
2937 case HW_EVENT_MALFUNCTION:
2938 PM8001_MSG_DBG(pm8001_ha,
2939 pm8001_printk("HW_EVENT_MALFUNCTION\n"));
2940 break;
2941 case HW_EVENT_BROADCAST_SES:
2942 PM8001_MSG_DBG(pm8001_ha,
2943 pm8001_printk("HW_EVENT_BROADCAST_SES\n"));
2944 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
2945 sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
2946 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
2947 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2948 break;
2949 case HW_EVENT_INBOUND_CRC_ERROR:
2950 PM8001_MSG_DBG(pm8001_ha,
2951 pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n"));
2952 pm80xx_hw_event_ack_req(pm8001_ha, 0,
2953 HW_EVENT_INBOUND_CRC_ERROR,
2954 port_id, phy_id, 0, 0);
2955 break;
2956 case HW_EVENT_HARD_RESET_RECEIVED:
2957 PM8001_MSG_DBG(pm8001_ha,
2958 pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n"));
2959 sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
2960 break;
2961 case HW_EVENT_ID_FRAME_TIMEOUT:
2962 PM8001_MSG_DBG(pm8001_ha,
2963 pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n"));
2964 sas_phy_disconnected(sas_phy);
2965 phy->phy_attached = 0;
2966 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
2967 break;
2968 case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
2969 PM8001_MSG_DBG(pm8001_ha,
2970 pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"));
2971 pm80xx_hw_event_ack_req(pm8001_ha, 0,
2972 HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
2973 port_id, phy_id, 0, 0);
2974 sas_phy_disconnected(sas_phy);
2975 phy->phy_attached = 0;
2976 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
2977 break;
2978 case HW_EVENT_PORT_RESET_TIMER_TMO:
2979 PM8001_MSG_DBG(pm8001_ha,
2980 pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n"));
2981 sas_phy_disconnected(sas_phy);
2982 phy->phy_attached = 0;
2983 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
2984 break;
2985 case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
2986 PM8001_MSG_DBG(pm8001_ha,
2987 pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"));
2988 pm80xx_hw_event_ack_req(pm8001_ha, 0,
2989 HW_EVENT_PORT_RECOVERY_TIMER_TMO,
2990 port_id, phy_id, 0, 0);
2991 sas_phy_disconnected(sas_phy);
2992 phy->phy_attached = 0;
2993 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
2994 break;
2995 case HW_EVENT_PORT_RECOVER:
2996 PM8001_MSG_DBG(pm8001_ha,
2997 pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
2998 break;
2999 case HW_EVENT_PORT_RESET_COMPLETE:
3000 PM8001_MSG_DBG(pm8001_ha,
3001 pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n"));
3002 break;
3003 case EVENT_BROADCAST_ASYNCH_EVENT:
3004 PM8001_MSG_DBG(pm8001_ha,
3005 pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
3006 break;
3007 default:
3008 PM8001_MSG_DBG(pm8001_ha,
3009 pm8001_printk("Unknown event type 0x%x\n", eventType));
3010 break;
3011 }
3012 return 0;
3013}
3014
3015/**
3016 * mpi_phy_stop_resp - SPCv specific
3017 * @pm8001_ha: our hba card information
3018 * @piomb: IO message buffer
3019 */
3020static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3021{
3022 struct phy_stop_resp *pPayload =
3023 (struct phy_stop_resp *)(piomb + 4);
3024 u32 status =
3025 le32_to_cpu(pPayload->status);
3026 u32 phyid =
3027 le32_to_cpu(pPayload->phyid);
3028 struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
3029 PM8001_MSG_DBG(pm8001_ha,
3030 pm8001_printk("phy:0x%x status:0x%x\n",
3031 phyid, status));
3032 if (status == 0)
3033 phy->phy_state = 0;
3034 return 0;
3035}
3036
3037/**
3038 * mpi_set_controller_config_resp - SPCv specific
3039 * @pm8001_ha: our hba card information
3040 * @piomb: IO message buffer
3041 */
3042static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
3043 void *piomb)
3044{
3045 struct set_ctrl_cfg_resp *pPayload =
3046 (struct set_ctrl_cfg_resp *)(piomb + 4);
3047 u32 status = le32_to_cpu(pPayload->status);
3048 u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd);
3049
3050 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3051 "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
3052 status, err_qlfr_pgcd));
3053
3054 return 0;
3055}
3056
3057/**
3058 * mpi_get_controller_config_resp - SPCv specific
3059 * @pm8001_ha: our hba card information
3060 * @piomb: IO message buffer
3061 */
3062static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
3063 void *piomb)
3064{
3065 PM8001_MSG_DBG(pm8001_ha,
3066 pm8001_printk(" pm80xx_addition_functionality\n"));
3067
3068 return 0;
3069}
3070
3071/**
3072 * mpi_get_phy_profile_resp - SPCv specific
3073 * @pm8001_ha: our hba card information
3074 * @piomb: IO message buffer
3075 */
3076static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
3077 void *piomb)
3078{
3079 PM8001_MSG_DBG(pm8001_ha,
3080 pm8001_printk(" pm80xx_addition_functionality\n"));
3081
3082 return 0;
3083}
3084
3085/**
3086 * mpi_flash_op_ext_resp - SPCv specific
3087 * @pm8001_ha: our hba card information
3088 * @piomb: IO message buffer
3089 */
3090static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3091{
3092 PM8001_MSG_DBG(pm8001_ha,
3093 pm8001_printk(" pm80xx_addition_functionality\n"));
3094
3095 return 0;
3096}
3097
3098/**
3099 * mpi_set_phy_profile_resp - SPCv specific
3100 * @pm8001_ha: our hba card information
3101 * @piomb: IO message buffer
3102 */
3103static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
3104 void *piomb)
3105{
3106 PM8001_MSG_DBG(pm8001_ha,
3107 pm8001_printk(" pm80xx_addition_functionality\n"));
3108
3109 return 0;
3110}
3111
3112/**
3113 * mpi_kek_management_resp - SPCv specific
3114 * @pm8001_ha: our hba card information
3115 * @piomb: IO message buffer
3116 */
3117static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha,
3118 void *piomb)
3119{
3120 struct kek_mgmt_resp *pPayload = (struct kek_mgmt_resp *)(piomb + 4);
3121
3122 u32 status = le32_to_cpu(pPayload->status);
3123 u32 kidx_new_curr_ksop = le32_to_cpu(pPayload->kidx_new_curr_ksop);
3124 u32 err_qlfr = le32_to_cpu(pPayload->err_qlfr);
3125
3126 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3127 "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n",
3128 status, kidx_new_curr_ksop, err_qlfr));
3129
3130 return 0;
3131}
3132
3133/**
3134 * mpi_dek_management_resp - SPCv specific
3135 * @pm8001_ha: our hba card information
3136 * @piomb: IO message buffer
3137 */
3138static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha,
3139 void *piomb)
3140{
3141 PM8001_MSG_DBG(pm8001_ha,
3142 pm8001_printk(" pm80xx_addition_functionality\n"));
3143
3144 return 0;
3145}
3146
3147/**
3148 * ssp_coalesced_comp_resp - SPCv specific
3149 * @pm8001_ha: our hba card information
3150 * @piomb: IO message buffer
3151 */
3152static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha,
3153 void *piomb)
3154{
3155 PM8001_MSG_DBG(pm8001_ha,
3156 pm8001_printk(" pm80xx_addition_functionality\n"));
3157
3158 return 0;
3159}
3160
3161/**
3162 * process_one_iomb - process one outbound Queue memory block
3163 * @pm8001_ha: our hba card information
3164 * @piomb: IO message buffer
3165 */
3166static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3167{
3168 __le32 pHeader = *(__le32 *)piomb;
3169 u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF);
3170
3171 switch (opc) {
3172 case OPC_OUB_ECHO:
3173 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n"));
3174 break;
3175 case OPC_OUB_HW_EVENT:
3176 PM8001_MSG_DBG(pm8001_ha,
3177 pm8001_printk("OPC_OUB_HW_EVENT\n"));
3178 mpi_hw_event(pm8001_ha, piomb);
3179 break;
3180 case OPC_OUB_THERM_HW_EVENT:
3181 PM8001_MSG_DBG(pm8001_ha,
3182 pm8001_printk("OPC_OUB_THERMAL_EVENT\n"));
3183 mpi_thermal_hw_event(pm8001_ha, piomb);
3184 break;
3185 case OPC_OUB_SSP_COMP:
3186 PM8001_MSG_DBG(pm8001_ha,
3187 pm8001_printk("OPC_OUB_SSP_COMP\n"));
3188 mpi_ssp_completion(pm8001_ha, piomb);
3189 break;
3190 case OPC_OUB_SMP_COMP:
3191 PM8001_MSG_DBG(pm8001_ha,
3192 pm8001_printk("OPC_OUB_SMP_COMP\n"));
3193 mpi_smp_completion(pm8001_ha, piomb);
3194 break;
3195 case OPC_OUB_LOCAL_PHY_CNTRL:
3196 PM8001_MSG_DBG(pm8001_ha,
3197 pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
3198 pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
3199 break;
3200 case OPC_OUB_DEV_REGIST:
3201 PM8001_MSG_DBG(pm8001_ha,
3202 pm8001_printk("OPC_OUB_DEV_REGIST\n"));
3203 pm8001_mpi_reg_resp(pm8001_ha, piomb);
3204 break;
3205 case OPC_OUB_DEREG_DEV:
3206 PM8001_MSG_DBG(pm8001_ha,
3207 pm8001_printk("unresgister the deviece\n"));
3208 pm8001_mpi_dereg_resp(pm8001_ha, piomb);
3209 break;
3210 case OPC_OUB_GET_DEV_HANDLE:
3211 PM8001_MSG_DBG(pm8001_ha,
3212 pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n"));
3213 break;
3214 case OPC_OUB_SATA_COMP:
3215 PM8001_MSG_DBG(pm8001_ha,
3216 pm8001_printk("OPC_OUB_SATA_COMP\n"));
3217 mpi_sata_completion(pm8001_ha, piomb);
3218 break;
3219 case OPC_OUB_SATA_EVENT:
3220 PM8001_MSG_DBG(pm8001_ha,
3221 pm8001_printk("OPC_OUB_SATA_EVENT\n"));
3222 mpi_sata_event(pm8001_ha, piomb);
3223 break;
3224 case OPC_OUB_SSP_EVENT:
3225 PM8001_MSG_DBG(pm8001_ha,
3226 pm8001_printk("OPC_OUB_SSP_EVENT\n"));
3227 mpi_ssp_event(pm8001_ha, piomb);
3228 break;
3229 case OPC_OUB_DEV_HANDLE_ARRIV:
3230 PM8001_MSG_DBG(pm8001_ha,
3231 pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n"));
3232 /*This is for target*/
3233 break;
3234 case OPC_OUB_SSP_RECV_EVENT:
3235 PM8001_MSG_DBG(pm8001_ha,
3236 pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n"));
3237 /*This is for target*/
3238 break;
3239 case OPC_OUB_FW_FLASH_UPDATE:
3240 PM8001_MSG_DBG(pm8001_ha,
3241 pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
3242 pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
3243 break;
3244 case OPC_OUB_GPIO_RESPONSE:
3245 PM8001_MSG_DBG(pm8001_ha,
3246 pm8001_printk("OPC_OUB_GPIO_RESPONSE\n"));
3247 break;
3248 case OPC_OUB_GPIO_EVENT:
3249 PM8001_MSG_DBG(pm8001_ha,
3250 pm8001_printk("OPC_OUB_GPIO_EVENT\n"));
3251 break;
3252 case OPC_OUB_GENERAL_EVENT:
3253 PM8001_MSG_DBG(pm8001_ha,
3254 pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
3255 pm8001_mpi_general_event(pm8001_ha, piomb);
3256 break;
3257 case OPC_OUB_SSP_ABORT_RSP:
3258 PM8001_MSG_DBG(pm8001_ha,
3259 pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
3260 pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
3261 break;
3262 case OPC_OUB_SATA_ABORT_RSP:
3263 PM8001_MSG_DBG(pm8001_ha,
3264 pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
3265 pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
3266 break;
3267 case OPC_OUB_SAS_DIAG_MODE_START_END:
3268 PM8001_MSG_DBG(pm8001_ha,
3269 pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n"));
3270 break;
3271 case OPC_OUB_SAS_DIAG_EXECUTE:
3272 PM8001_MSG_DBG(pm8001_ha,
3273 pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n"));
3274 break;
3275 case OPC_OUB_GET_TIME_STAMP:
3276 PM8001_MSG_DBG(pm8001_ha,
3277 pm8001_printk("OPC_OUB_GET_TIME_STAMP\n"));
3278 break;
3279 case OPC_OUB_SAS_HW_EVENT_ACK:
3280 PM8001_MSG_DBG(pm8001_ha,
3281 pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n"));
3282 break;
3283 case OPC_OUB_PORT_CONTROL:
3284 PM8001_MSG_DBG(pm8001_ha,
3285 pm8001_printk("OPC_OUB_PORT_CONTROL\n"));
3286 break;
3287 case OPC_OUB_SMP_ABORT_RSP:
3288 PM8001_MSG_DBG(pm8001_ha,
3289 pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
3290 pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
3291 break;
3292 case OPC_OUB_GET_NVMD_DATA:
3293 PM8001_MSG_DBG(pm8001_ha,
3294 pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
3295 pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
3296 break;
3297 case OPC_OUB_SET_NVMD_DATA:
3298 PM8001_MSG_DBG(pm8001_ha,
3299 pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
3300 pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
3301 break;
3302 case OPC_OUB_DEVICE_HANDLE_REMOVAL:
3303 PM8001_MSG_DBG(pm8001_ha,
3304 pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n"));
3305 break;
3306 case OPC_OUB_SET_DEVICE_STATE:
3307 PM8001_MSG_DBG(pm8001_ha,
3308 pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
3309 pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
3310 break;
3311 case OPC_OUB_GET_DEVICE_STATE:
3312 PM8001_MSG_DBG(pm8001_ha,
3313 pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n"));
3314 break;
3315 case OPC_OUB_SET_DEV_INFO:
3316 PM8001_MSG_DBG(pm8001_ha,
3317 pm8001_printk("OPC_OUB_SET_DEV_INFO\n"));
3318 break;
3319 /* spcv specifc commands */
3320 case OPC_OUB_PHY_START_RESP:
3321 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3322 "OPC_OUB_PHY_START_RESP opcode:%x\n", opc));
3323 mpi_phy_start_resp(pm8001_ha, piomb);
3324 break;
3325 case OPC_OUB_PHY_STOP_RESP:
3326 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3327 "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc));
3328 mpi_phy_stop_resp(pm8001_ha, piomb);
3329 break;
3330 case OPC_OUB_SET_CONTROLLER_CONFIG:
3331 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3332 "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc));
3333 mpi_set_controller_config_resp(pm8001_ha, piomb);
3334 break;
3335 case OPC_OUB_GET_CONTROLLER_CONFIG:
3336 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3337 "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc));
3338 mpi_get_controller_config_resp(pm8001_ha, piomb);
3339 break;
3340 case OPC_OUB_GET_PHY_PROFILE:
3341 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3342 "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc));
3343 mpi_get_phy_profile_resp(pm8001_ha, piomb);
3344 break;
3345 case OPC_OUB_FLASH_OP_EXT:
3346 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3347 "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc));
3348 mpi_flash_op_ext_resp(pm8001_ha, piomb);
3349 break;
3350 case OPC_OUB_SET_PHY_PROFILE:
3351 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3352 "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc));
3353 mpi_set_phy_profile_resp(pm8001_ha, piomb);
3354 break;
3355 case OPC_OUB_KEK_MANAGEMENT_RESP:
3356 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3357 "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc));
3358 mpi_kek_management_resp(pm8001_ha, piomb);
3359 break;
3360 case OPC_OUB_DEK_MANAGEMENT_RESP:
3361 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3362 "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc));
3363 mpi_dek_management_resp(pm8001_ha, piomb);
3364 break;
3365 case OPC_OUB_SSP_COALESCED_COMP_RESP:
3366 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3367 "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc));
3368 ssp_coalesced_comp_resp(pm8001_ha, piomb);
3369 break;
3370 default:
3371 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
3372 "Unknown outbound Queue IOMB OPC = 0x%x\n", opc));
3373 break;
3374 }
3375}
3376
3377static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
3378{
3379 struct outbound_queue_table *circularQ;
3380 void *pMsg1 = NULL;
3381 u8 uninitialized_var(bc);
3382 u32 ret = MPI_IO_STATUS_FAIL;
3383 unsigned long flags;
3384
3385 spin_lock_irqsave(&pm8001_ha->lock, flags);
3386 circularQ = &pm8001_ha->outbnd_q_tbl[vec];
3387 do {
3388 ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
3389 if (MPI_IO_STATUS_SUCCESS == ret) {
3390 /* process the outbound message */
3391 process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
3392 /* free the message from the outbound circular buffer */
3393 pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
3394 circularQ, bc);
3395 }
3396 if (MPI_IO_STATUS_BUSY == ret) {
3397 /* Update the producer index from SPC */
3398 circularQ->producer_index =
3399 cpu_to_le32(pm8001_read_32(circularQ->pi_virt));
3400 if (le32_to_cpu(circularQ->producer_index) ==
3401 circularQ->consumer_idx)
3402 /* OQ is empty */
3403 break;
3404 }
3405 } while (1);
3406 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
3407 return ret;
3408}
3409
3410/* PCI_DMA_... to our direction translation. */
3411static const u8 data_dir_flags[] = {
3412 [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
3413 [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */
3414 [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */
3415 [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */
3416};
3417
3418static void build_smp_cmd(u32 deviceID, __le32 hTag,
3419 struct smp_req *psmp_cmd, int mode, int length)
3420{
3421 psmp_cmd->tag = hTag;
3422 psmp_cmd->device_id = cpu_to_le32(deviceID);
3423 if (mode == SMP_DIRECT) {
3424 length = length - 4; /* subtract crc */
3425 psmp_cmd->len_ip_ir = cpu_to_le32(length << 16);
3426 } else {
3427 psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1));
3428 }
3429}
3430
3431/**
3432 * pm8001_chip_smp_req - send a SMP task to FW
3433 * @pm8001_ha: our hba card information.
3434 * @ccb: the ccb information this request used.
3435 */
3436static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
3437 struct pm8001_ccb_info *ccb)
3438{
3439 int elem, rc;
3440 struct sas_task *task = ccb->task;
3441 struct domain_device *dev = task->dev;
3442 struct pm8001_device *pm8001_dev = dev->lldd_dev;
3443 struct scatterlist *sg_req, *sg_resp;
3444 u32 req_len, resp_len;
3445 struct smp_req smp_cmd;
3446 u32 opc;
3447 struct inbound_queue_table *circularQ;
3448 char *preq_dma_addr = NULL;
3449 __le64 tmp_addr;
3450 u32 i, length;
3451
3452 memset(&smp_cmd, 0, sizeof(smp_cmd));
3453 /*
3454 * DMA-map SMP request, response buffers
3455 */
3456 sg_req = &task->smp_task.smp_req;
3457 elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
3458 if (!elem)
3459 return -ENOMEM;
3460 req_len = sg_dma_len(sg_req);
3461
3462 sg_resp = &task->smp_task.smp_resp;
3463 elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
3464 if (!elem) {
3465 rc = -ENOMEM;
3466 goto err_out;
3467 }
3468 resp_len = sg_dma_len(sg_resp);
3469 /* must be in dwords */
3470 if ((req_len & 0x3) || (resp_len & 0x3)) {
3471 rc = -EINVAL;
3472 goto err_out_2;
3473 }
3474
3475 opc = OPC_INB_SMP_REQUEST;
3476 circularQ = &pm8001_ha->inbnd_q_tbl[0];
3477 smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
3478
3479 length = sg_req->length;
3480 PM8001_IO_DBG(pm8001_ha,
3481 pm8001_printk("SMP Frame Length %d\n", sg_req->length));
3482 if (!(length - 8))
3483 pm8001_ha->smp_exp_mode = SMP_DIRECT;
3484 else
3485 pm8001_ha->smp_exp_mode = SMP_INDIRECT;
3486
3487 /* DIRECT MODE support only in spcv/ve */
3488 pm8001_ha->smp_exp_mode = SMP_DIRECT;
3489
3490 tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
3491 preq_dma_addr = (char *)phys_to_virt(tmp_addr);
3492
3493 /* INDIRECT MODE command settings. Use DMA */
3494 if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) {
3495 PM8001_IO_DBG(pm8001_ha,
3496 pm8001_printk("SMP REQUEST INDIRECT MODE\n"));
3497 /* for SPCv indirect mode. Place the top 4 bytes of
3498 * SMP Request header here. */
3499 for (i = 0; i < 4; i++)
3500 smp_cmd.smp_req16[i] = *(preq_dma_addr + i);
3501 /* exclude top 4 bytes for SMP req header */
3502 smp_cmd.long_smp_req.long_req_addr =
3503 cpu_to_le64((u64)sg_dma_address
3504 (&task->smp_task.smp_req) - 4);
3505 /* exclude 4 bytes for SMP req header and CRC */
3506 smp_cmd.long_smp_req.long_req_size =
3507 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8);
3508 smp_cmd.long_smp_req.long_resp_addr =
3509 cpu_to_le64((u64)sg_dma_address
3510 (&task->smp_task.smp_resp));
3511 smp_cmd.long_smp_req.long_resp_size =
3512 cpu_to_le32((u32)sg_dma_len
3513 (&task->smp_task.smp_resp)-4);
3514 } else { /* DIRECT MODE */
3515 smp_cmd.long_smp_req.long_req_addr =
3516 cpu_to_le64((u64)sg_dma_address
3517 (&task->smp_task.smp_req));
3518 smp_cmd.long_smp_req.long_req_size =
3519 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
3520 smp_cmd.long_smp_req.long_resp_addr =
3521 cpu_to_le64((u64)sg_dma_address
3522 (&task->smp_task.smp_resp));
3523 smp_cmd.long_smp_req.long_resp_size =
3524 cpu_to_le32
3525 ((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
3526 }
3527 if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
3528 PM8001_IO_DBG(pm8001_ha,
3529 pm8001_printk("SMP REQUEST DIRECT MODE\n"));
3530 for (i = 0; i < length; i++)
3531 if (i < 16) {
3532 smp_cmd.smp_req16[i] = *(preq_dma_addr+i);
3533 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
3534 "Byte[%d]:%x (DMA data:%x)\n",
3535 i, smp_cmd.smp_req16[i],
3536 *(preq_dma_addr)));
3537 } else {
3538 smp_cmd.smp_req[i] = *(preq_dma_addr+i);
3539 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
3540 "Byte[%d]:%x (DMA data:%x)\n",
3541 i, smp_cmd.smp_req[i],
3542 *(preq_dma_addr)));
3543 }
3544 }
3545
3546 build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
3547 &smp_cmd, pm8001_ha->smp_exp_mode, length);
3548 pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0);
3549 return 0;
3550
3551err_out_2:
3552 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
3553 PCI_DMA_FROMDEVICE);
3554err_out:
3555 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
3556 PCI_DMA_TODEVICE);
3557 return rc;
3558}
3559
3560static int check_enc_sas_cmd(struct sas_task *task)
3561{
3562 if ((task->ssp_task.cdb[0] == READ_10)
3563 || (task->ssp_task.cdb[0] == WRITE_10)
3564 || (task->ssp_task.cdb[0] == WRITE_VERIFY))
3565 return 1;
3566 else
3567 return 0;
3568}
3569
3570static int check_enc_sat_cmd(struct sas_task *task)
3571{
3572 int ret = 0;
3573 switch (task->ata_task.fis.command) {
3574 case ATA_CMD_FPDMA_READ:
3575 case ATA_CMD_READ_EXT:
3576 case ATA_CMD_READ:
3577 case ATA_CMD_FPDMA_WRITE:
3578 case ATA_CMD_WRITE_EXT:
3579 case ATA_CMD_WRITE:
3580 case ATA_CMD_PIO_READ:
3581 case ATA_CMD_PIO_READ_EXT:
3582 case ATA_CMD_PIO_WRITE:
3583 case ATA_CMD_PIO_WRITE_EXT:
3584 ret = 1;
3585 break;
3586 default:
3587 ret = 0;
3588 break;
3589 }
3590 return ret;
3591}
3592
3593/**
3594 * pm80xx_chip_ssp_io_req - send a SSP task to FW
3595 * @pm8001_ha: our hba card information.
3596 * @ccb: the ccb information this request used.
3597 */
3598static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
3599 struct pm8001_ccb_info *ccb)
3600{
3601 struct sas_task *task = ccb->task;
3602 struct domain_device *dev = task->dev;
3603 struct pm8001_device *pm8001_dev = dev->lldd_dev;
3604 struct ssp_ini_io_start_req ssp_cmd;
3605 u32 tag = ccb->ccb_tag;
3606 int ret;
3607 u64 phys_addr;
3608 struct inbound_queue_table *circularQ;
3609 static u32 inb;
3610 static u32 outb;
3611 u32 opc = OPC_INB_SSPINIIOSTART;
3612 memset(&ssp_cmd, 0, sizeof(ssp_cmd));
3613 memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
3614 /* data address domain added for spcv; set to 0 by host,
3615 * used internally by controller
3616 * 0 for SAS 1.1 and SAS 2.0 compatible TLR
3617 */
3618 ssp_cmd.dad_dir_m_tlr =
3619 cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);
3620 ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
3621 ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
3622 ssp_cmd.tag = cpu_to_le32(tag);
3623 if (task->ssp_task.enable_first_burst)
3624 ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
3625 ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
3626 ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
3627 memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16);
3628 circularQ = &pm8001_ha->inbnd_q_tbl[0];
3629
3630 /* Check if encryption is set */
3631 if (pm8001_ha->chip->encrypt &&
3632 !(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) {
3633 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
3634 "Encryption enabled.Sending Encrypt SAS command 0x%x\n",
3635 task->ssp_task.cdb[0]));
3636 opc = OPC_INB_SSP_INI_DIF_ENC_IO;
3637 /* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/
3638 ssp_cmd.dad_dir_m_tlr = cpu_to_le32
3639 ((data_dir_flags[task->data_dir] << 8) | 0x20 | 0x0);
3640
3641 /* fill in PRD (scatter/gather) table, if any */
3642 if (task->num_scatter > 1) {
3643 pm8001_chip_make_sg(task->scatter,
3644 ccb->n_elem, ccb->buf_prd);
3645 phys_addr = ccb->ccb_dma_handle +
3646 offsetof(struct pm8001_ccb_info, buf_prd[0]);
3647 ssp_cmd.enc_addr_low =
3648 cpu_to_le32(lower_32_bits(phys_addr));
3649 ssp_cmd.enc_addr_high =
3650 cpu_to_le32(upper_32_bits(phys_addr));
3651 ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
3652 } else if (task->num_scatter == 1) {
3653 u64 dma_addr = sg_dma_address(task->scatter);
3654 ssp_cmd.enc_addr_low =
3655 cpu_to_le32(lower_32_bits(dma_addr));
3656 ssp_cmd.enc_addr_high =
3657 cpu_to_le32(upper_32_bits(dma_addr));
3658 ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
3659 ssp_cmd.enc_esgl = 0;
3660 } else if (task->num_scatter == 0) {
3661 ssp_cmd.enc_addr_low = 0;
3662 ssp_cmd.enc_addr_high = 0;
3663 ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
3664 ssp_cmd.enc_esgl = 0;
3665 }
3666 /* XTS mode. All other fields are 0 */
3667 ssp_cmd.key_cmode = 0x6 << 4;
3668 /* set tweak values. Should be the start lba */
3669 ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cdb[2] << 24) |
3670 (task->ssp_task.cdb[3] << 16) |
3671 (task->ssp_task.cdb[4] << 8) |
3672 (task->ssp_task.cdb[5]));
3673 } else {
3674 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
3675 "Sending Normal SAS command 0x%x inb q %x\n",
3676 task->ssp_task.cdb[0], inb));
3677 /* fill in PRD (scatter/gather) table, if any */
3678 if (task->num_scatter > 1) {
3679 pm8001_chip_make_sg(task->scatter, ccb->n_elem,
3680 ccb->buf_prd);
3681 phys_addr = ccb->ccb_dma_handle +
3682 offsetof(struct pm8001_ccb_info, buf_prd[0]);
3683 ssp_cmd.addr_low =
3684 cpu_to_le32(lower_32_bits(phys_addr));
3685 ssp_cmd.addr_high =
3686 cpu_to_le32(upper_32_bits(phys_addr));
3687 ssp_cmd.esgl = cpu_to_le32(1<<31);
3688 } else if (task->num_scatter == 1) {
3689 u64 dma_addr = sg_dma_address(task->scatter);
3690 ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
3691 ssp_cmd.addr_high =
3692 cpu_to_le32(upper_32_bits(dma_addr));
3693 ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
3694 ssp_cmd.esgl = 0;
3695 } else if (task->num_scatter == 0) {
3696 ssp_cmd.addr_low = 0;
3697 ssp_cmd.addr_high = 0;
3698 ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
3699 ssp_cmd.esgl = 0;
3700 }
3701 }
3702 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++);
3703
3704 /* rotate the outb queue */
3705 outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
3706
3707 return ret;
3708}
3709
3710static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
3711 struct pm8001_ccb_info *ccb)
3712{
3713 struct sas_task *task = ccb->task;
3714 struct domain_device *dev = task->dev;
3715 struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
3716 u32 tag = ccb->ccb_tag;
3717 int ret;
3718 static u32 inb;
3719 static u32 outb;
3720 struct sata_start_req sata_cmd;
3721 u32 hdr_tag, ncg_tag = 0;
3722 u64 phys_addr;
3723 u32 ATAP = 0x0;
3724 u32 dir;
3725 struct inbound_queue_table *circularQ;
3726 unsigned long flags;
3727 u32 opc = OPC_INB_SATA_HOST_OPSTART;
3728 memset(&sata_cmd, 0, sizeof(sata_cmd));
3729 circularQ = &pm8001_ha->inbnd_q_tbl[0];
3730
3731 if (task->data_dir == PCI_DMA_NONE) {
3732 ATAP = 0x04; /* no data*/
3733 PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
3734 } else if (likely(!task->ata_task.device_control_reg_update)) {
3735 if (task->ata_task.dma_xfer) {
3736 ATAP = 0x06; /* DMA */
3737 PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n"));
3738 } else {
3739 ATAP = 0x05; /* PIO*/
3740 PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
3741 }
3742 if (task->ata_task.use_ncq &&
3743 dev->sata_dev.command_set != ATAPI_COMMAND_SET) {
3744 ATAP = 0x07; /* FPDMA */
3745 PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
3746 }
3747 }
3748 if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
3749 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
3750 ncg_tag = hdr_tag;
3751 }
3752 dir = data_dir_flags[task->data_dir] << 8;
3753 sata_cmd.tag = cpu_to_le32(tag);
3754 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
3755 sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
3756
3757 sata_cmd.sata_fis = task->ata_task.fis;
3758 if (likely(!task->ata_task.device_control_reg_update))
3759 sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
3760 sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */
3761
3762 /* Check if encryption is set */
3763 if (pm8001_ha->chip->encrypt &&
3764 !(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) {
3765 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
3766 "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n",
3767 sata_cmd.sata_fis.command));
3768 opc = OPC_INB_SATA_DIF_ENC_IO;
3769
3770 /* set encryption bit */
3771 sata_cmd.ncqtag_atap_dir_m_dad =
3772 cpu_to_le32(((ncg_tag & 0xff)<<16)|
3773 ((ATAP & 0x3f) << 10) | 0x20 | dir);
3774 /* dad (bit 0-1) is 0 */
3775 /* fill in PRD (scatter/gather) table, if any */
3776 if (task->num_scatter > 1) {
3777 pm8001_chip_make_sg(task->scatter,
3778 ccb->n_elem, ccb->buf_prd);
3779 phys_addr = ccb->ccb_dma_handle +
3780 offsetof(struct pm8001_ccb_info, buf_prd[0]);
3781 sata_cmd.enc_addr_low = lower_32_bits(phys_addr);
3782 sata_cmd.enc_addr_high = upper_32_bits(phys_addr);
3783 sata_cmd.enc_esgl = cpu_to_le32(1 << 31);
3784 } else if (task->num_scatter == 1) {
3785 u64 dma_addr = sg_dma_address(task->scatter);
3786 sata_cmd.enc_addr_low = lower_32_bits(dma_addr);
3787 sata_cmd.enc_addr_high = upper_32_bits(dma_addr);
3788 sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
3789 sata_cmd.enc_esgl = 0;
3790 } else if (task->num_scatter == 0) {
3791 sata_cmd.enc_addr_low = 0;
3792 sata_cmd.enc_addr_high = 0;
3793 sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
3794 sata_cmd.enc_esgl = 0;
3795 }
3796 /* XTS mode. All other fields are 0 */
3797 sata_cmd.key_index_mode = 0x6 << 4;
3798 /* set tweak values. Should be the start lba */
3799 sata_cmd.twk_val0 =
3800 cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) |
3801 (sata_cmd.sata_fis.lbah << 16) |
3802 (sata_cmd.sata_fis.lbam << 8) |
3803 (sata_cmd.sata_fis.lbal));
3804 sata_cmd.twk_val1 =
3805 cpu_to_le32((sata_cmd.sata_fis.lbah_exp << 8) |
3806 (sata_cmd.sata_fis.lbam_exp));
3807 } else {
3808 PM8001_IO_DBG(pm8001_ha, pm8001_printk(
3809 "Sending Normal SATA command 0x%x inb %x\n",
3810 sata_cmd.sata_fis.command, inb));
3811 /* dad (bit 0-1) is 0 */
3812 sata_cmd.ncqtag_atap_dir_m_dad =
3813 cpu_to_le32(((ncg_tag & 0xff)<<16) |
3814 ((ATAP & 0x3f) << 10) | dir);
3815
3816 /* fill in PRD (scatter/gather) table, if any */
3817 if (task->num_scatter > 1) {
3818 pm8001_chip_make_sg(task->scatter,
3819 ccb->n_elem, ccb->buf_prd);
3820 phys_addr = ccb->ccb_dma_handle +
3821 offsetof(struct pm8001_ccb_info, buf_prd[0]);
3822 sata_cmd.addr_low = lower_32_bits(phys_addr);
3823 sata_cmd.addr_high = upper_32_bits(phys_addr);
3824 sata_cmd.esgl = cpu_to_le32(1 << 31);
3825 } else if (task->num_scatter == 1) {
3826 u64 dma_addr = sg_dma_address(task->scatter);
3827 sata_cmd.addr_low = lower_32_bits(dma_addr);
3828 sata_cmd.addr_high = upper_32_bits(dma_addr);
3829 sata_cmd.len = cpu_to_le32(task->total_xfer_len);
3830 sata_cmd.esgl = 0;
3831 } else if (task->num_scatter == 0) {
3832 sata_cmd.addr_low = 0;
3833 sata_cmd.addr_high = 0;
3834 sata_cmd.len = cpu_to_le32(task->total_xfer_len);
3835 sata_cmd.esgl = 0;
3836 }
3837 /* scsi cdb */
3838 sata_cmd.atapi_scsi_cdb[0] =
3839 cpu_to_le32(((task->ata_task.atapi_packet[0]) |
3840 (task->ata_task.atapi_packet[1] << 8) |
3841 (task->ata_task.atapi_packet[2] << 16) |
3842 (task->ata_task.atapi_packet[3] << 24)));
3843 sata_cmd.atapi_scsi_cdb[1] =
3844 cpu_to_le32(((task->ata_task.atapi_packet[4]) |
3845 (task->ata_task.atapi_packet[5] << 8) |
3846 (task->ata_task.atapi_packet[6] << 16) |
3847 (task->ata_task.atapi_packet[7] << 24)));
3848 sata_cmd.atapi_scsi_cdb[2] =
3849 cpu_to_le32(((task->ata_task.atapi_packet[8]) |
3850 (task->ata_task.atapi_packet[9] << 8) |
3851 (task->ata_task.atapi_packet[10] << 16) |
3852 (task->ata_task.atapi_packet[11] << 24)));
3853 sata_cmd.atapi_scsi_cdb[3] =
3854 cpu_to_le32(((task->ata_task.atapi_packet[12]) |
3855 (task->ata_task.atapi_packet[13] << 8) |
3856 (task->ata_task.atapi_packet[14] << 16) |
3857 (task->ata_task.atapi_packet[15] << 24)));
3858 }
3859
3860 /* Check for read log for failed drive and return */
3861 if (sata_cmd.sata_fis.command == 0x2f) {
3862 if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
3863 (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) ||
3864 (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) {
3865 struct task_status_struct *ts;
3866
3867 pm8001_ha_dev->id &= 0xDFFFFFFF;
3868 ts = &task->task_status;
3869
3870 spin_lock_irqsave(&task->task_state_lock, flags);
3871 ts->resp = SAS_TASK_COMPLETE;
3872 ts->stat = SAM_STAT_GOOD;
3873 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
3874 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
3875 task->task_state_flags |= SAS_TASK_STATE_DONE;
3876 if (unlikely((task->task_state_flags &
3877 SAS_TASK_STATE_ABORTED))) {
3878 spin_unlock_irqrestore(&task->task_state_lock,
3879 flags);
3880 PM8001_FAIL_DBG(pm8001_ha,
3881 pm8001_printk("task 0x%p resp 0x%x "
3882 " stat 0x%x but aborted by upper layer "
3883 "\n", task, ts->resp, ts->stat));
3884 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
3885 return 0;
3886 } else if (task->uldd_task) {
3887 spin_unlock_irqrestore(&task->task_state_lock,
3888 flags);
3889 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
3890 mb();/* ditto */
3891 spin_unlock_irq(&pm8001_ha->lock);
3892 task->task_done(task);
3893 spin_lock_irq(&pm8001_ha->lock);
3894 return 0;
3895 } else if (!task->uldd_task) {
3896 spin_unlock_irqrestore(&task->task_state_lock,
3897 flags);
3898 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
3899 mb();/*ditto*/
3900 spin_unlock_irq(&pm8001_ha->lock);
3901 task->task_done(task);
3902 spin_lock_irq(&pm8001_ha->lock);
3903 return 0;
3904 }
3905 }
3906 }
3907
3908 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
3909 &sata_cmd, outb++);
3910
3911 /* rotate the outb queue */
3912 outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
3913 return ret;
3914}
3915
3916/**
3917 * pm80xx_chip_phy_start_req - start phy via PHY_START COMMAND
3918 * @pm8001_ha: our hba card information.
3919 * @num: the inbound queue number
3920 * @phy_id: the phy id which we wanted to start up.
3921 */
3922static int
3923pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
3924{
3925 struct phy_start_req payload;
3926 struct inbound_queue_table *circularQ;
3927 int ret;
3928 u32 tag = 0x01;
3929 u32 opcode = OPC_INB_PHYSTART;
3930 circularQ = &pm8001_ha->inbnd_q_tbl[0];
3931 memset(&payload, 0, sizeof(payload));
3932 payload.tag = cpu_to_le32(tag);
3933
3934 PM8001_INIT_DBG(pm8001_ha,
3935 pm8001_printk("PHY START REQ for phy_id %d\n", phy_id));
3936 /*
3937 ** [0:7] PHY Identifier
3938 ** [8:11] link rate 1.5G, 3G, 6G
3939 ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode
3940 ** [14] 0b disable spin up hold; 1b enable spin up hold
3941 ** [15] ob no change in current PHY analig setup 1b enable using SPAST
3942 */
3943 payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
3944 LINKMODE_AUTO | LINKRATE_15 |
3945 LINKRATE_30 | LINKRATE_60 | phy_id);
3946 /* SSC Disable and SAS Analog ST configuration */
3947 /**
3948 payload.ase_sh_lm_slr_phyid =
3949 cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE |
3950 LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 |
3951 phy_id);
3952 Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need
3953 **/
3954
3955 payload.sas_identify.dev_type = SAS_END_DEVICE;
3956 payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
3957 memcpy(payload.sas_identify.sas_addr,
3958 pm8001_ha->sas_addr, SAS_ADDR_SIZE);
3959 payload.sas_identify.phy_id = phy_id;
3960 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
3961 return ret;
3962}
3963
3964/**
3965 * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND
3966 * @pm8001_ha: our hba card information.
3967 * @num: the inbound queue number
3968 * @phy_id: the phy id which we wanted to start up.
3969 */
3970static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
3971 u8 phy_id)
3972{
3973 struct phy_stop_req payload;
3974 struct inbound_queue_table *circularQ;
3975 int ret;
3976 u32 tag = 0x01;
3977 u32 opcode = OPC_INB_PHYSTOP;
3978 circularQ = &pm8001_ha->inbnd_q_tbl[0];
3979 memset(&payload, 0, sizeof(payload));
3980 payload.tag = cpu_to_le32(tag);
3981 payload.phy_id = cpu_to_le32(phy_id);
3982 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
3983 return ret;
3984}
3985
3986/**
3987 * see comments on pm8001_mpi_reg_resp.
3988 */
3989static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
3990 struct pm8001_device *pm8001_dev, u32 flag)
3991{
3992 struct reg_dev_req payload;
3993 u32 opc;
3994 u32 stp_sspsmp_sata = 0x4;
3995 struct inbound_queue_table *circularQ;
3996 u32 linkrate, phy_id;
3997 int rc, tag = 0xdeadbeef;
3998 struct pm8001_ccb_info *ccb;
3999 u8 retryFlag = 0x1;
4000 u16 firstBurstSize = 0;
4001 u16 ITNT = 2000;
4002 struct domain_device *dev = pm8001_dev->sas_device;
4003 struct domain_device *parent_dev = dev->parent;
4004 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4005
4006 memset(&payload, 0, sizeof(payload));
4007 rc = pm8001_tag_alloc(pm8001_ha, &tag);
4008 if (rc)
4009 return rc;
4010 ccb = &pm8001_ha->ccb_info[tag];
4011 ccb->device = pm8001_dev;
4012 ccb->ccb_tag = tag;
4013 payload.tag = cpu_to_le32(tag);
4014
4015 if (flag == 1) {
4016 stp_sspsmp_sata = 0x02; /*direct attached sata */
4017 } else {
4018 if (pm8001_dev->dev_type == SAS_SATA_DEV)
4019 stp_sspsmp_sata = 0x00; /* stp*/
4020 else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
4021 pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
4022 pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
4023 stp_sspsmp_sata = 0x01; /*ssp or smp*/
4024 }
4025 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
4026 phy_id = parent_dev->ex_dev.ex_phy->phy_id;
4027 else
4028 phy_id = pm8001_dev->attached_phy;
4029
4030 opc = OPC_INB_REG_DEV;
4031
4032 linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
4033 pm8001_dev->sas_device->linkrate : dev->port->linkrate;
4034
4035 payload.phyid_portid =
4036 cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0xFF) |
4037 ((phy_id & 0xFF) << 8));
4038
4039 payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) |
4040 ((linkrate & 0x0F) << 24) |
4041 ((stp_sspsmp_sata & 0x03) << 28));
4042 payload.firstburstsize_ITNexustimeout =
4043 cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
4044
4045 memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
4046 SAS_ADDR_SIZE);
4047
4048 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
4049
4050 return rc;
4051}
4052
4053/**
4054 * pm80xx_chip_phy_ctl_req - support the local phy operation
4055 * @pm8001_ha: our hba card information.
4056 * @num: the inbound queue number
4057 * @phy_id: the phy id which we wanted to operate
4058 * @phy_op:
4059 */
4060static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
4061 u32 phyId, u32 phy_op)
4062{
4063 struct local_phy_ctl_req payload;
4064 struct inbound_queue_table *circularQ;
4065 int ret;
4066 u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
4067 memset(&payload, 0, sizeof(payload));
4068 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4069 payload.tag = cpu_to_le32(1);
4070 payload.phyop_phyid =
4071 cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
4072 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
4073 return ret;
4074}
4075
4076static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
4077{
4078 u32 value;
4079#ifdef PM8001_USE_MSIX
4080 return 1;
4081#endif
4082 value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
4083 if (value)
4084 return 1;
4085 return 0;
4086
4087}
4088
4089/**
4090 * pm8001_chip_isr - PM8001 isr handler.
4091 * @pm8001_ha: our hba card information.
4092 * @irq: irq number.
4093 * @stat: stat.
4094 */
4095static irqreturn_t
4096pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
4097{
4098 pm80xx_chip_interrupt_disable(pm8001_ha, vec);
4099 process_oq(pm8001_ha, vec);
4100 pm80xx_chip_interrupt_enable(pm8001_ha, vec);
4101 return IRQ_HANDLED;
4102}
4103
4104const struct pm8001_dispatch pm8001_80xx_dispatch = {
4105 .name = "pmc80xx",
4106 .chip_init = pm80xx_chip_init,
4107 .chip_soft_rst = pm80xx_chip_soft_rst,
4108 .chip_rst = pm80xx_hw_chip_rst,
4109 .chip_iounmap = pm8001_chip_iounmap,
4110 .isr = pm80xx_chip_isr,
4111 .is_our_interupt = pm80xx_chip_is_our_interupt,
4112 .isr_process_oq = process_oq,
4113 .interrupt_enable = pm80xx_chip_interrupt_enable,
4114 .interrupt_disable = pm80xx_chip_interrupt_disable,
4115 .make_prd = pm8001_chip_make_sg,
4116 .smp_req = pm80xx_chip_smp_req,
4117 .ssp_io_req = pm80xx_chip_ssp_io_req,
4118 .sata_req = pm80xx_chip_sata_req,
4119 .phy_start_req = pm80xx_chip_phy_start_req,
4120 .phy_stop_req = pm80xx_chip_phy_stop_req,
4121 .reg_dev_req = pm80xx_chip_reg_dev_req,
4122 .dereg_dev_req = pm8001_chip_dereg_dev_req,
4123 .phy_ctl_req = pm80xx_chip_phy_ctl_req,
4124 .task_abort = pm8001_chip_abort_task,
4125 .ssp_tm_req = pm8001_chip_ssp_tm_req,
4126 .get_nvmd_req = pm8001_chip_get_nvmd_req,
4127 .set_nvmd_req = pm8001_chip_set_nvmd_req,
4128 .fw_flash_update_req = pm8001_chip_fw_flash_update_req,
4129 .set_dev_state_req = pm8001_chip_set_dev_state_req,
4130};
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
new file mode 100644
index 000000000000..2b760ba75d7b
--- /dev/null
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -0,0 +1,1523 @@
1/*
2 * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40
41#ifndef _PMC8001_REG_H_
42#define _PMC8001_REG_H_
43
44#include <linux/types.h>
45#include <scsi/libsas.h>
46
47/* for Request Opcode of IOMB */
48#define OPC_INB_ECHO 1 /* 0x000 */
49#define OPC_INB_PHYSTART 4 /* 0x004 */
50#define OPC_INB_PHYSTOP 5 /* 0x005 */
51#define OPC_INB_SSPINIIOSTART 6 /* 0x006 */
52#define OPC_INB_SSPINITMSTART 7 /* 0x007 */
53/* 0x8 RESV IN SPCv */
54#define OPC_INB_RSVD 8 /* 0x008 */
55#define OPC_INB_DEV_HANDLE_ACCEPT 9 /* 0x009 */
56#define OPC_INB_SSPTGTIOSTART 10 /* 0x00A */
57#define OPC_INB_SSPTGTRSPSTART 11 /* 0x00B */
58/* 0xC, 0xD, 0xE removed in SPCv */
59#define OPC_INB_SSP_ABORT 15 /* 0x00F */
60#define OPC_INB_DEREG_DEV_HANDLE 16 /* 0x010 */
61#define OPC_INB_GET_DEV_HANDLE 17 /* 0x011 */
62#define OPC_INB_SMP_REQUEST 18 /* 0x012 */
63/* 0x13 SMP_RESPONSE is removed in SPCv */
64#define OPC_INB_SMP_ABORT 20 /* 0x014 */
65/* 0x16 RESV IN SPCv */
66#define OPC_INB_RSVD1 22 /* 0x016 */
67#define OPC_INB_SATA_HOST_OPSTART 23 /* 0x017 */
68#define OPC_INB_SATA_ABORT 24 /* 0x018 */
69#define OPC_INB_LOCAL_PHY_CONTROL 25 /* 0x019 */
70/* 0x1A RESV IN SPCv */
71#define OPC_INB_RSVD2 26 /* 0x01A */
72#define OPC_INB_FW_FLASH_UPDATE 32 /* 0x020 */
73#define OPC_INB_GPIO 34 /* 0x022 */
74#define OPC_INB_SAS_DIAG_MODE_START_END 35 /* 0x023 */
75#define OPC_INB_SAS_DIAG_EXECUTE 36 /* 0x024 */
76/* 0x25 RESV IN SPCv */
77#define OPC_INB_RSVD3 37 /* 0x025 */
78#define OPC_INB_GET_TIME_STAMP 38 /* 0x026 */
79#define OPC_INB_PORT_CONTROL 39 /* 0x027 */
80#define OPC_INB_GET_NVMD_DATA 40 /* 0x028 */
81#define OPC_INB_SET_NVMD_DATA 41 /* 0x029 */
82#define OPC_INB_SET_DEVICE_STATE 42 /* 0x02A */
83#define OPC_INB_GET_DEVICE_STATE 43 /* 0x02B */
84#define OPC_INB_SET_DEV_INFO 44 /* 0x02C */
85/* 0x2D RESV IN SPCv */
86#define OPC_INB_RSVD4 45 /* 0x02D */
87#define OPC_INB_SGPIO_REGISTER 46 /* 0x02E */
88#define OPC_INB_PCIE_DIAG_EXEC 47 /* 0x02F */
89#define OPC_INB_SET_CONTROLLER_CONFIG 48 /* 0x030 */
90#define OPC_INB_GET_CONTROLLER_CONFIG 49 /* 0x031 */
91#define OPC_INB_REG_DEV 50 /* 0x032 */
92#define OPC_INB_SAS_HW_EVENT_ACK 51 /* 0x033 */
93#define OPC_INB_GET_DEVICE_INFO 52 /* 0x034 */
94#define OPC_INB_GET_PHY_PROFILE 53 /* 0x035 */
95#define OPC_INB_FLASH_OP_EXT 54 /* 0x036 */
96#define OPC_INB_SET_PHY_PROFILE 55 /* 0x037 */
97#define OPC_INB_KEK_MANAGEMENT 256 /* 0x100 */
98#define OPC_INB_DEK_MANAGEMENT 257 /* 0x101 */
99#define OPC_INB_SSP_INI_DIF_ENC_IO 258 /* 0x102 */
100#define OPC_INB_SATA_DIF_ENC_IO 259 /* 0x103 */
101
102/* for Response Opcode of IOMB */
103#define OPC_OUB_ECHO 1 /* 0x001 */
104#define OPC_OUB_RSVD 4 /* 0x004 */
105#define OPC_OUB_SSP_COMP 5 /* 0x005 */
106#define OPC_OUB_SMP_COMP 6 /* 0x006 */
107#define OPC_OUB_LOCAL_PHY_CNTRL 7 /* 0x007 */
108#define OPC_OUB_RSVD1 10 /* 0x00A */
109#define OPC_OUB_DEREG_DEV 11 /* 0x00B */
110#define OPC_OUB_GET_DEV_HANDLE 12 /* 0x00C */
111#define OPC_OUB_SATA_COMP 13 /* 0x00D */
112#define OPC_OUB_SATA_EVENT 14 /* 0x00E */
113#define OPC_OUB_SSP_EVENT 15 /* 0x00F */
114#define OPC_OUB_RSVD2 16 /* 0x010 */
115/* 0x11 - SMP_RECEIVED Notification removed in SPCv*/
116#define OPC_OUB_SSP_RECV_EVENT 18 /* 0x012 */
117#define OPC_OUB_RSVD3 19 /* 0x013 */
118#define OPC_OUB_FW_FLASH_UPDATE 20 /* 0x014 */
119#define OPC_OUB_GPIO_RESPONSE 22 /* 0x016 */
120#define OPC_OUB_GPIO_EVENT 23 /* 0x017 */
121#define OPC_OUB_GENERAL_EVENT 24 /* 0x018 */
122#define OPC_OUB_SSP_ABORT_RSP 26 /* 0x01A */
123#define OPC_OUB_SATA_ABORT_RSP 27 /* 0x01B */
124#define OPC_OUB_SAS_DIAG_MODE_START_END 28 /* 0x01C */
125#define OPC_OUB_SAS_DIAG_EXECUTE 29 /* 0x01D */
126#define OPC_OUB_GET_TIME_STAMP 30 /* 0x01E */
127#define OPC_OUB_RSVD4 31 /* 0x01F */
128#define OPC_OUB_PORT_CONTROL 32 /* 0x020 */
129#define OPC_OUB_SKIP_ENTRY 33 /* 0x021 */
130#define OPC_OUB_SMP_ABORT_RSP 34 /* 0x022 */
131#define OPC_OUB_GET_NVMD_DATA 35 /* 0x023 */
132#define OPC_OUB_SET_NVMD_DATA 36 /* 0x024 */
133#define OPC_OUB_DEVICE_HANDLE_REMOVAL 37 /* 0x025 */
134#define OPC_OUB_SET_DEVICE_STATE 38 /* 0x026 */
135#define OPC_OUB_GET_DEVICE_STATE 39 /* 0x027 */
136#define OPC_OUB_SET_DEV_INFO 40 /* 0x028 */
137#define OPC_OUB_RSVD5 41 /* 0x029 */
138#define OPC_OUB_HW_EVENT 1792 /* 0x700 */
139#define OPC_OUB_DEV_HANDLE_ARRIV 1824 /* 0x720 */
140#define OPC_OUB_THERM_HW_EVENT 1840 /* 0x730 */
141#define OPC_OUB_SGPIO_RESP 2094 /* 0x82E */
142#define OPC_OUB_PCIE_DIAG_EXECUTE 2095 /* 0x82F */
143#define OPC_OUB_DEV_REGIST 2098 /* 0x832 */
144#define OPC_OUB_SAS_HW_EVENT_ACK 2099 /* 0x833 */
145#define OPC_OUB_GET_DEVICE_INFO 2100 /* 0x834 */
146/* spcv specific commands */
147#define OPC_OUB_PHY_START_RESP 2052 /* 0x804 */
148#define OPC_OUB_PHY_STOP_RESP 2053 /* 0x805 */
149#define OPC_OUB_SET_CONTROLLER_CONFIG 2096 /* 0x830 */
150#define OPC_OUB_GET_CONTROLLER_CONFIG 2097 /* 0x831 */
151#define OPC_OUB_GET_PHY_PROFILE 2101 /* 0x835 */
152#define OPC_OUB_FLASH_OP_EXT 2102 /* 0x836 */
153#define OPC_OUB_SET_PHY_PROFILE 2103 /* 0x837 */
154#define OPC_OUB_KEK_MANAGEMENT_RESP 2304 /* 0x900 */
155#define OPC_OUB_DEK_MANAGEMENT_RESP 2305 /* 0x901 */
156#define OPC_OUB_SSP_COALESCED_COMP_RESP 2306 /* 0x902 */
157
158/* for phy start*/
159#define SSC_DISABLE_15 (0x01 << 16)
160#define SSC_DISABLE_30 (0x02 << 16)
161#define SSC_DISABLE_60 (0x04 << 16)
162#define SAS_ASE (0x01 << 15)
163#define SPINHOLD_DISABLE (0x00 << 14)
164#define SPINHOLD_ENABLE (0x01 << 14)
165#define LINKMODE_SAS (0x01 << 12)
166#define LINKMODE_DSATA (0x02 << 12)
167#define LINKMODE_AUTO (0x03 << 12)
168#define LINKRATE_15 (0x01 << 8)
169#define LINKRATE_30 (0x02 << 8)
170#define LINKRATE_60 (0x06 << 8)
171
172/* Thermal related */
173#define THERMAL_ENABLE 0x1
174#define THERMAL_LOG_ENABLE 0x1
175#define THERMAL_OP_CODE 0x6
176#define LTEMPHIL 70
177#define RTEMPHIL 100
178
179/* Encryption info */
180#define SCRATCH_PAD3_ENC_DISABLED 0x00000000
181#define SCRATCH_PAD3_ENC_DIS_ERR 0x00000001
182#define SCRATCH_PAD3_ENC_ENA_ERR 0x00000002
183#define SCRATCH_PAD3_ENC_READY 0x00000003
184#define SCRATCH_PAD3_ENC_MASK SCRATCH_PAD3_ENC_READY
185
186#define SCRATCH_PAD3_XTS_ENABLED (1 << 14)
187#define SCRATCH_PAD3_SMA_ENABLED (1 << 4)
188#define SCRATCH_PAD3_SMB_ENABLED (1 << 5)
189#define SCRATCH_PAD3_SMF_ENABLED 0
190#define SCRATCH_PAD3_SM_MASK 0x000000F0
191#define SCRATCH_PAD3_ERR_CODE 0x00FF0000
192
193#define SEC_MODE_SMF 0x0
194#define SEC_MODE_SMA 0x100
195#define SEC_MODE_SMB 0x200
196#define CIPHER_MODE_ECB 0x00000001
197#define CIPHER_MODE_XTS 0x00000002
198#define KEK_MGMT_SUBOP_KEYCARDUPDATE 0x4
199
200/* SAS protocol timer configuration page */
201#define SAS_PROTOCOL_TIMER_CONFIG_PAGE 0x04
202#define STP_MCT_TMO 32
203#define SSP_MCT_TMO 32
204#define SAS_MAX_OPEN_TIME 5
205#define SMP_MAX_CONN_TIMER 0xFF
206#define STP_FRM_TIMER 0
207#define STP_IDLE_TIME 5 /* 5 us; controller default */
208#define SAS_MFD 0
209#define SAS_OPNRJT_RTRY_INTVL 2
210#define SAS_DOPNRJT_RTRY_TMO 128
211#define SAS_COPNRJT_RTRY_TMO 128
212
213/*
214 Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
215 Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
216 is DOPNRJT_RTRY_TMO
217*/
218#define SAS_DOPNRJT_RTRY_THR 23438
219#define SAS_COPNRJT_RTRY_THR 23438
220#define SAS_MAX_AIP 0x200000
221#define IT_NEXUS_TIMEOUT 0x7D0
222#define PORT_RECOVERY_TIMEOUT ((IT_NEXUS_TIMEOUT/100) + 30)
223
224struct mpi_msg_hdr {
225 __le32 header; /* Bits [11:0] - Message operation code */
226 /* Bits [15:12] - Message Category */
227 /* Bits [21:16] - Outboundqueue ID for the
228 operation completion message */
229 /* Bits [23:22] - Reserved */
230 /* Bits [28:24] - Buffer Count, indicates how
231 many buffer are allocated for the massage */
232 /* Bits [30:29] - Reserved */
233 /* Bits [31] - Message Valid bit */
234} __attribute__((packed, aligned(4)));
235
236/*
237 * brief the data structure of PHY Start Command
238 * use to describe enable the phy (128 bytes)
239 */
240struct phy_start_req {
241 __le32 tag;
242 __le32 ase_sh_lm_slr_phyid;
243 struct sas_identify_frame sas_identify; /* 28 Bytes */
244 __le32 spasti;
245 u32 reserved[21];
246} __attribute__((packed, aligned(4)));
247
248/*
249 * brief the data structure of PHY Start Command
250 * use to disable the phy (128 bytes)
251 */
252struct phy_stop_req {
253 __le32 tag;
254 __le32 phy_id;
255 u32 reserved[29];
256} __attribute__((packed, aligned(4)));
257
258/* set device bits fis - device to host */
259struct set_dev_bits_fis {
260 u8 fis_type; /* 0xA1*/
261 u8 n_i_pmport;
262 /* b7 : n Bit. Notification bit. If set device needs attention. */
263 /* b6 : i Bit. Interrupt Bit */
264 /* b5-b4: reserved2 */
265 /* b3-b0: PM Port */
266 u8 status;
267 u8 error;
268 u32 _r_a;
269} __attribute__ ((packed));
270/* PIO setup FIS - device to host */
271struct pio_setup_fis {
272 u8 fis_type; /* 0x5f */
273 u8 i_d_pmPort;
274 /* b7 : reserved */
275 /* b6 : i bit. Interrupt bit */
276 /* b5 : d bit. data transfer direction. set to 1 for device to host
277 xfer */
278 /* b4 : reserved */
279 /* b3-b0: PM Port */
280 u8 status;
281 u8 error;
282 u8 lbal;
283 u8 lbam;
284 u8 lbah;
285 u8 device;
286 u8 lbal_exp;
287 u8 lbam_exp;
288 u8 lbah_exp;
289 u8 _r_a;
290 u8 sector_count;
291 u8 sector_count_exp;
292 u8 _r_b;
293 u8 e_status;
294 u8 _r_c[2];
295 u8 transfer_count;
296} __attribute__ ((packed));
297
298/*
299 * brief the data structure of SATA Completion Response
300 * use to describe the sata task response (64 bytes)
301 */
302struct sata_completion_resp {
303 __le32 tag;
304 __le32 status;
305 __le32 param;
306 u32 sata_resp[12];
307} __attribute__((packed, aligned(4)));
308
309/*
310 * brief the data structure of SAS HW Event Notification
311 * use to alert the host about the hardware event(64 bytes)
312 */
313/* updated outbound struct for spcv */
314
315struct hw_event_resp {
316 __le32 lr_status_evt_portid;
317 __le32 evt_param;
318 __le32 phyid_npip_portstate;
319 struct sas_identify_frame sas_identify;
320 struct dev_to_host_fis sata_fis;
321} __attribute__((packed, aligned(4)));
322
323/*
324 * brief the data structure for thermal event notification
325 */
326
327struct thermal_hw_event {
328 __le32 thermal_event;
329 __le32 rht_lht;
330} __attribute__((packed, aligned(4)));
331
332/*
333 * brief the data structure of REGISTER DEVICE Command
334 * use to describe MPI REGISTER DEVICE Command (64 bytes)
335 */
336
337struct reg_dev_req {
338 __le32 tag;
339 __le32 phyid_portid;
340 __le32 dtype_dlr_mcn_ir_retry;
341 __le32 firstburstsize_ITNexustimeout;
342 u8 sas_addr[SAS_ADDR_SIZE];
343 __le32 upper_device_id;
344 u32 reserved[24];
345} __attribute__((packed, aligned(4)));
346
347/*
348 * brief the data structure of DEREGISTER DEVICE Command
349 * use to request spc to remove all internal resources associated
350 * with the device id (64 bytes)
351 */
352
353struct dereg_dev_req {
354 __le32 tag;
355 __le32 device_id;
356 u32 reserved[29];
357} __attribute__((packed, aligned(4)));
358
359/*
360 * brief the data structure of DEVICE_REGISTRATION Response
361 * use to notify the completion of the device registration (64 bytes)
362 */
363struct dev_reg_resp {
364 __le32 tag;
365 __le32 status;
366 __le32 device_id;
367 u32 reserved[12];
368} __attribute__((packed, aligned(4)));
369
370/*
371 * brief the data structure of Local PHY Control Command
372 * use to issue PHY CONTROL to local phy (64 bytes)
373 */
374struct local_phy_ctl_req {
375 __le32 tag;
376 __le32 phyop_phyid;
377 u32 reserved1[29];
378} __attribute__((packed, aligned(4)));
379
380/**
381 * brief the data structure of Local Phy Control Response
382 * use to describe MPI Local Phy Control Response (64 bytes)
383 */
384 struct local_phy_ctl_resp {
385 __le32 tag;
386 __le32 phyop_phyid;
387 __le32 status;
388 u32 reserved[12];
389} __attribute__((packed, aligned(4)));
390
391#define OP_BITS 0x0000FF00
392#define ID_BITS 0x000000FF
393
394/*
395 * brief the data structure of PORT Control Command
396 * use to control port properties (64 bytes)
397 */
398
399struct port_ctl_req {
400 __le32 tag;
401 __le32 portop_portid;
402 __le32 param0;
403 __le32 param1;
404 u32 reserved1[27];
405} __attribute__((packed, aligned(4)));
406
407/*
408 * brief the data structure of HW Event Ack Command
409 * use to acknowledge receive HW event (64 bytes)
410 */
411struct hw_event_ack_req {
412 __le32 tag;
413 __le32 phyid_sea_portid;
414 __le32 param0;
415 __le32 param1;
416 u32 reserved1[27];
417} __attribute__((packed, aligned(4)));
418
419/*
420 * brief the data structure of PHY_START Response Command
421 * indicates the completion of PHY_START command (64 bytes)
422 */
423struct phy_start_resp {
424 __le32 tag;
425 __le32 status;
426 __le32 phyid;
427 u32 reserved[12];
428} __attribute__((packed, aligned(4)));
429
430/*
431 * brief the data structure of PHY_STOP Response Command
432 * indicates the completion of PHY_STOP command (64 bytes)
433 */
434struct phy_stop_resp {
435 __le32 tag;
436 __le32 status;
437 __le32 phyid;
438 u32 reserved[12];
439} __attribute__((packed, aligned(4)));
440
441/*
442 * brief the data structure of SSP Completion Response
443 * use to indicate a SSP Completion (n bytes)
444 */
445struct ssp_completion_resp {
446 __le32 tag;
447 __le32 status;
448 __le32 param;
449 __le32 ssptag_rescv_rescpad;
450 struct ssp_response_iu ssp_resp_iu;
451 __le32 residual_count;
452} __attribute__((packed, aligned(4)));
453
454#define SSP_RESCV_BIT 0x00010000
455
456/*
457 * brief the data structure of SATA EVNET response
458 * use to indicate a SATA Completion (64 bytes)
459 */
460struct sata_event_resp {
461 __le32 tag;
462 __le32 event;
463 __le32 port_id;
464 __le32 device_id;
465 u32 reserved;
466 __le32 event_param0;
467 __le32 event_param1;
468 __le32 sata_addr_h32;
469 __le32 sata_addr_l32;
470 __le32 e_udt1_udt0_crc;
471 __le32 e_udt5_udt4_udt3_udt2;
472 __le32 a_udt1_udt0_crc;
473 __le32 a_udt5_udt4_udt3_udt2;
474 __le32 hwdevid_diferr;
475 __le32 err_framelen_byteoffset;
476 __le32 err_dataframe;
477} __attribute__((packed, aligned(4)));
478
479/*
480 * brief the data structure of SSP EVNET esponse
481 * use to indicate a SSP Completion (64 bytes)
482 */
483struct ssp_event_resp {
484 __le32 tag;
485 __le32 event;
486 __le32 port_id;
487 __le32 device_id;
488 __le32 ssp_tag;
489 __le32 event_param0;
490 __le32 event_param1;
491 __le32 sas_addr_h32;
492 __le32 sas_addr_l32;
493 __le32 e_udt1_udt0_crc;
494 __le32 e_udt5_udt4_udt3_udt2;
495 __le32 a_udt1_udt0_crc;
496 __le32 a_udt5_udt4_udt3_udt2;
497 __le32 hwdevid_diferr;
498 __le32 err_framelen_byteoffset;
499 __le32 err_dataframe;
500} __attribute__((packed, aligned(4)));
501
502/**
503 * brief the data structure of General Event Notification Response
504 * use to describe MPI General Event Notification Response (64 bytes)
505 */
506struct general_event_resp {
507 __le32 status;
508 __le32 inb_IOMB_payload[14];
509} __attribute__((packed, aligned(4)));
510
511#define GENERAL_EVENT_PAYLOAD 14
512#define OPCODE_BITS 0x00000fff
513
514/*
515 * brief the data structure of SMP Request Command
516 * use to describe MPI SMP REQUEST Command (64 bytes)
517 */
518struct smp_req {
519 __le32 tag;
520 __le32 device_id;
521 __le32 len_ip_ir;
522 /* Bits [0] - Indirect response */
523 /* Bits [1] - Indirect Payload */
524 /* Bits [15:2] - Reserved */
525 /* Bits [23:16] - direct payload Len */
526 /* Bits [31:24] - Reserved */
527 u8 smp_req16[16];
528 union {
529 u8 smp_req[32];
530 struct {
531 __le64 long_req_addr;/* sg dma address, LE */
532 __le32 long_req_size;/* LE */
533 u32 _r_a;
534 __le64 long_resp_addr;/* sg dma address, LE */
535 __le32 long_resp_size;/* LE */
536 u32 _r_b;
537 } long_smp_req;/* sequencer extension */
538 };
539 __le32 rsvd[16];
540} __attribute__((packed, aligned(4)));
541/*
542 * brief the data structure of SMP Completion Response
543 * use to describe MPI SMP Completion Response (64 bytes)
544 */
545struct smp_completion_resp {
546 __le32 tag;
547 __le32 status;
548 __le32 param;
549 u8 _r_a[252];
550} __attribute__((packed, aligned(4)));
551
552/*
553 *brief the data structure of SSP SMP SATA Abort Command
554 * use to describe MPI SSP SMP & SATA Abort Command (64 bytes)
555 */
556struct task_abort_req {
557 __le32 tag;
558 __le32 device_id;
559 __le32 tag_to_abort;
560 __le32 abort_all;
561 u32 reserved[27];
562} __attribute__((packed, aligned(4)));
563
564/* These flags used for SSP SMP & SATA Abort */
565#define ABORT_MASK 0x3
566#define ABORT_SINGLE 0x0
567#define ABORT_ALL 0x1
568
569/**
570 * brief the data structure of SSP SATA SMP Abort Response
571 * use to describe SSP SMP & SATA Abort Response ( 64 bytes)
572 */
573struct task_abort_resp {
574 __le32 tag;
575 __le32 status;
576 __le32 scp;
577 u32 reserved[12];
578} __attribute__((packed, aligned(4)));
579
580/**
581 * brief the data structure of SAS Diagnostic Start/End Command
582 * use to describe MPI SAS Diagnostic Start/End Command (64 bytes)
583 */
584struct sas_diag_start_end_req {
585 __le32 tag;
586 __le32 operation_phyid;
587 u32 reserved[29];
588} __attribute__((packed, aligned(4)));
589
590/**
591 * brief the data structure of SAS Diagnostic Execute Command
592 * use to describe MPI SAS Diagnostic Execute Command (64 bytes)
593 */
594struct sas_diag_execute_req {
595 __le32 tag;
596 __le32 cmdtype_cmddesc_phyid;
597 __le32 pat1_pat2;
598 __le32 threshold;
599 __le32 codepat_errmsk;
600 __le32 pmon;
601 __le32 pERF1CTL;
602 u32 reserved[24];
603} __attribute__((packed, aligned(4)));
604
605#define SAS_DIAG_PARAM_BYTES 24
606
607/*
608 * brief the data structure of Set Device State Command
609 * use to describe MPI Set Device State Command (64 bytes)
610 */
611struct set_dev_state_req {
612 __le32 tag;
613 __le32 device_id;
614 __le32 nds;
615 u32 reserved[28];
616} __attribute__((packed, aligned(4)));
617
618/*
619 * brief the data structure of SATA Start Command
620 * use to describe MPI SATA IO Start Command (64 bytes)
621 * Note: This structure is common for normal / encryption I/O
622 */
623
624struct sata_start_req {
625 __le32 tag;
626 __le32 device_id;
627 __le32 data_len;
628 __le32 ncqtag_atap_dir_m_dad;
629 struct host_to_dev_fis sata_fis;
630 u32 reserved1;
631 u32 reserved2; /* dword 11. rsvd for normal I/O. */
632 /* EPLE Descl for enc I/O */
633 u32 addr_low; /* dword 12. rsvd for enc I/O */
634 u32 addr_high; /* dword 13. reserved for enc I/O */
635 __le32 len; /* dword 14: length for normal I/O. */
636 /* EPLE Desch for enc I/O */
637 __le32 esgl; /* dword 15. rsvd for enc I/O */
638 __le32 atapi_scsi_cdb[4]; /* dword 16-19. rsvd for enc I/O */
639 /* The below fields are reserved for normal I/O */
640 __le32 key_index_mode; /* dword 20 */
641 __le32 sector_cnt_enss;/* dword 21 */
642 __le32 keytagl; /* dword 22 */
643 __le32 keytagh; /* dword 23 */
644 __le32 twk_val0; /* dword 24 */
645 __le32 twk_val1; /* dword 25 */
646 __le32 twk_val2; /* dword 26 */
647 __le32 twk_val3; /* dword 27 */
648 __le32 enc_addr_low; /* dword 28. Encryption SGL address high */
649 __le32 enc_addr_high; /* dword 29. Encryption SGL address low */
650 __le32 enc_len; /* dword 30. Encryption length */
651 __le32 enc_esgl; /* dword 31. Encryption esgl bit */
652} __attribute__((packed, aligned(4)));
653
654/**
655 * brief the data structure of SSP INI TM Start Command
656 * use to describe MPI SSP INI TM Start Command (64 bytes)
657 */
658struct ssp_ini_tm_start_req {
659 __le32 tag;
660 __le32 device_id;
661 __le32 relate_tag;
662 __le32 tmf;
663 u8 lun[8];
664 __le32 ds_ads_m;
665 u32 reserved[24];
666} __attribute__((packed, aligned(4)));
667
668struct ssp_info_unit {
669 u8 lun[8];/* SCSI Logical Unit Number */
670 u8 reserved1;/* reserved */
671 u8 efb_prio_attr;
672 /* B7 : enabledFirstBurst */
673 /* B6-3 : taskPriority */
674 /* B2-0 : taskAttribute */
675 u8 reserved2; /* reserved */
676 u8 additional_cdb_len;
677 /* B7-2 : additional_cdb_len */
678 /* B1-0 : reserved */
679 u8 cdb[16];/* The SCSI CDB up to 16 bytes length */
680} __attribute__((packed, aligned(4)));
681
682/**
683 * brief the data structure of SSP INI IO Start Command
684 * use to describe MPI SSP INI IO Start Command (64 bytes)
685 * Note: This structure is common for normal / encryption I/O
686 */
687struct ssp_ini_io_start_req {
688 __le32 tag;
689 __le32 device_id;
690 __le32 data_len;
691 __le32 dad_dir_m_tlr;
692 struct ssp_info_unit ssp_iu;
693 __le32 addr_low; /* dword 12: sgl low for normal I/O. */
694 /* epl_descl for encryption I/O */
695 __le32 addr_high; /* dword 13: sgl hi for normal I/O */
696 /* dpl_descl for encryption I/O */
697 __le32 len; /* dword 14: len for normal I/O. */
698 /* edpl_desch for encryption I/O */
699 __le32 esgl; /* dword 15: ESGL bit for normal I/O. */
700 /* user defined tag mask for enc I/O */
701 /* The below fields are reserved for normal I/O */
702 u8 udt[12]; /* dword 16-18 */
703 __le32 sectcnt_ios; /* dword 19 */
704 __le32 key_cmode; /* dword 20 */
705 __le32 ks_enss; /* dword 21 */
706 __le32 keytagl; /* dword 22 */
707 __le32 keytagh; /* dword 23 */
708 __le32 twk_val0; /* dword 24 */
709 __le32 twk_val1; /* dword 25 */
710 __le32 twk_val2; /* dword 26 */
711 __le32 twk_val3; /* dword 27 */
712 __le32 enc_addr_low; /* dword 28: Encryption sgl addr low */
713 __le32 enc_addr_high; /* dword 29: Encryption sgl addr hi */
714 __le32 enc_len; /* dword 30: Encryption length */
715 __le32 enc_esgl; /* dword 31: ESGL bit for encryption */
716} __attribute__((packed, aligned(4)));
717
718/**
719 * brief the data structure for SSP_INI_DIF_ENC_IO COMMAND
720 * use to initiate SSP I/O operation with optional DIF/ENC
721 */
722struct ssp_dif_enc_io_req {
723 __le32 tag;
724 __le32 device_id;
725 __le32 data_len;
726 __le32 dirMTlr;
727 __le32 sspiu0;
728 __le32 sspiu1;
729 __le32 sspiu2;
730 __le32 sspiu3;
731 __le32 sspiu4;
732 __le32 sspiu5;
733 __le32 sspiu6;
734 __le32 epl_des;
735 __le32 dpl_desl_ndplr;
736 __le32 dpl_desh;
737 __le32 uum_uuv_bss_difbits;
738 u8 udt[12];
739 __le32 sectcnt_ios;
740 __le32 key_cmode;
741 __le32 ks_enss;
742 __le32 keytagl;
743 __le32 keytagh;
744 __le32 twk_val0;
745 __le32 twk_val1;
746 __le32 twk_val2;
747 __le32 twk_val3;
748 __le32 addr_low;
749 __le32 addr_high;
750 __le32 len;
751 __le32 esgl;
752} __attribute__((packed, aligned(4)));
753
754/**
755 * brief the data structure of Firmware download
756 * use to describe MPI FW DOWNLOAD Command (64 bytes)
757 */
758struct fw_flash_Update_req {
759 __le32 tag;
760 __le32 cur_image_offset;
761 __le32 cur_image_len;
762 __le32 total_image_len;
763 u32 reserved0[7];
764 __le32 sgl_addr_lo;
765 __le32 sgl_addr_hi;
766 __le32 len;
767 __le32 ext_reserved;
768 u32 reserved1[16];
769} __attribute__((packed, aligned(4)));
770
771#define FWFLASH_IOMB_RESERVED_LEN 0x07
772/**
773 * brief the data structure of FW_FLASH_UPDATE Response
774 * use to describe MPI FW_FLASH_UPDATE Response (64 bytes)
775 *
776 */
777 struct fw_flash_Update_resp {
778 __le32 tag;
779 __le32 status;
780 u32 reserved[13];
781} __attribute__((packed, aligned(4)));
782
783/**
784 * brief the data structure of Get NVM Data Command
785 * use to get data from NVM in HBA(64 bytes)
786 */
787struct get_nvm_data_req {
788 __le32 tag;
789 __le32 len_ir_vpdd;
790 __le32 vpd_offset;
791 u32 reserved[8];
792 __le32 resp_addr_lo;
793 __le32 resp_addr_hi;
794 __le32 resp_len;
795 u32 reserved1[17];
796} __attribute__((packed, aligned(4)));
797
798struct set_nvm_data_req {
799 __le32 tag;
800 __le32 len_ir_vpdd;
801 __le32 vpd_offset;
802 u32 reserved[8];
803 __le32 resp_addr_lo;
804 __le32 resp_addr_hi;
805 __le32 resp_len;
806 u32 reserved1[17];
807} __attribute__((packed, aligned(4)));
808
809/**
810 * brief the data structure for SET CONTROLLER CONFIG COMMAND
811 * use to modify controller configuration
812 */
813struct set_ctrl_cfg_req {
814 __le32 tag;
815 __le32 cfg_pg[14];
816 u32 reserved[16];
817} __attribute__((packed, aligned(4)));
818
819/**
820 * brief the data structure for GET CONTROLLER CONFIG COMMAND
821 * use to get controller configuration page
822 */
823struct get_ctrl_cfg_req {
824 __le32 tag;
825 __le32 pgcd;
826 __le32 int_vec;
827 u32 reserved[28];
828} __attribute__((packed, aligned(4)));
829
830/**
831 * brief the data structure for KEK_MANAGEMENT COMMAND
832 * use for KEK management
833 */
834struct kek_mgmt_req {
835 __le32 tag;
836 __le32 new_curidx_ksop;
837 u32 reserved;
838 __le32 kblob[12];
839 u32 reserved1[16];
840} __attribute__((packed, aligned(4)));
841
842/**
843 * brief the data structure for DEK_MANAGEMENT COMMAND
844 * use for DEK management
845 */
846struct dek_mgmt_req {
847 __le32 tag;
848 __le32 kidx_dsop;
849 __le32 dekidx;
850 __le32 addr_l;
851 __le32 addr_h;
852 __le32 nent;
853 __le32 dbf_tblsize;
854 u32 reserved[24];
855} __attribute__((packed, aligned(4)));
856
857/**
858 * brief the data structure for SET PHY PROFILE COMMAND
859 * use to retrive phy specific information
860 */
861struct set_phy_profile_req {
862 __le32 tag;
863 __le32 ppc_phyid;
864 u32 reserved[29];
865} __attribute__((packed, aligned(4)));
866
867/**
868 * brief the data structure for GET PHY PROFILE COMMAND
869 * use to retrive phy specific information
870 */
871struct get_phy_profile_req {
872 __le32 tag;
873 __le32 ppc_phyid;
874 __le32 profile[29];
875} __attribute__((packed, aligned(4)));
876
877/**
878 * brief the data structure for EXT FLASH PARTITION
879 * use to manage ext flash partition
880 */
881struct ext_flash_partition_req {
882 __le32 tag;
883 __le32 cmd;
884 __le32 offset;
885 __le32 len;
886 u32 reserved[7];
887 __le32 addr_low;
888 __le32 addr_high;
889 __le32 len1;
890 __le32 ext;
891 u32 reserved1[16];
892} __attribute__((packed, aligned(4)));
893
894#define TWI_DEVICE 0x0
895#define C_SEEPROM 0x1
896#define VPD_FLASH 0x4
897#define AAP1_RDUMP 0x5
898#define IOP_RDUMP 0x6
899#define EXPAN_ROM 0x7
900
901#define IPMode 0x80000000
902#define NVMD_TYPE 0x0000000F
903#define NVMD_STAT 0x0000FFFF
904#define NVMD_LEN 0xFF000000
905/**
906 * brief the data structure of Get NVMD Data Response
907 * use to describe MPI Get NVMD Data Response (64 bytes)
908 */
909struct get_nvm_data_resp {
910 __le32 tag;
911 __le32 ir_tda_bn_dps_das_nvm;
912 __le32 dlen_status;
913 __le32 nvm_data[12];
914} __attribute__((packed, aligned(4)));
915
916/**
917 * brief the data structure of SAS Diagnostic Start/End Response
918 * use to describe MPI SAS Diagnostic Start/End Response (64 bytes)
919 *
920 */
921struct sas_diag_start_end_resp {
922 __le32 tag;
923 __le32 status;
924 u32 reserved[13];
925} __attribute__((packed, aligned(4)));
926
927/**
928 * brief the data structure of SAS Diagnostic Execute Response
929 * use to describe MPI SAS Diagnostic Execute Response (64 bytes)
930 *
931 */
932struct sas_diag_execute_resp {
933 __le32 tag;
934 __le32 cmdtype_cmddesc_phyid;
935 __le32 Status;
936 __le32 ReportData;
937 u32 reserved[11];
938} __attribute__((packed, aligned(4)));
939
940/**
941 * brief the data structure of Set Device State Response
942 * use to describe MPI Set Device State Response (64 bytes)
943 *
944 */
945struct set_dev_state_resp {
946 __le32 tag;
947 __le32 status;
948 __le32 device_id;
949 __le32 pds_nds;
950 u32 reserved[11];
951} __attribute__((packed, aligned(4)));
952
953/* new outbound structure for spcv - begins */
954/**
955 * brief the data structure for SET CONTROLLER CONFIG COMMAND
956 * use to modify controller configuration
957 */
958struct set_ctrl_cfg_resp {
959 __le32 tag;
960 __le32 status;
961 __le32 err_qlfr_pgcd;
962 u32 reserved[12];
963} __attribute__((packed, aligned(4)));
964
965struct get_ctrl_cfg_resp {
966 __le32 tag;
967 __le32 status;
968 __le32 err_qlfr;
969 __le32 confg_page[12];
970} __attribute__((packed, aligned(4)));
971
972struct kek_mgmt_resp {
973 __le32 tag;
974 __le32 status;
975 __le32 kidx_new_curr_ksop;
976 __le32 err_qlfr;
977 u32 reserved[11];
978} __attribute__((packed, aligned(4)));
979
980struct dek_mgmt_resp {
981 __le32 tag;
982 __le32 status;
983 __le32 kekidx_tbls_dsop;
984 __le32 dekidx;
985 __le32 err_qlfr;
986 u32 reserved[10];
987} __attribute__((packed, aligned(4)));
988
989struct get_phy_profile_resp {
990 __le32 tag;
991 __le32 status;
992 __le32 ppc_phyid;
993 __le32 ppc_specific_rsp[12];
994} __attribute__((packed, aligned(4)));
995
996struct flash_op_ext_resp {
997 __le32 tag;
998 __le32 cmd;
999 __le32 status;
1000 __le32 epart_size;
1001 __le32 epart_sect_size;
1002 u32 reserved[10];
1003} __attribute__((packed, aligned(4)));
1004
1005struct set_phy_profile_resp {
1006 __le32 tag;
1007 __le32 status;
1008 __le32 ppc_phyid;
1009 __le32 ppc_specific_rsp[12];
1010} __attribute__((packed, aligned(4)));
1011
1012struct ssp_coalesced_comp_resp {
1013 __le32 coal_cnt;
1014 __le32 tag0;
1015 __le32 ssp_tag0;
1016 __le32 tag1;
1017 __le32 ssp_tag1;
1018 __le32 add_tag_ssp_tag[10];
1019} __attribute__((packed, aligned(4)));
1020
1021/* new outbound structure for spcv - ends */
1022
1023/* brief data structure for SAS protocol timer configuration page.
1024 *
1025 */
1026struct SASProtocolTimerConfig {
1027 __le32 pageCode; /* 0 */
1028 __le32 MST_MSI; /* 1 */
1029 __le32 STP_SSP_MCT_TMO; /* 2 */
1030 __le32 STP_FRM_TMO; /* 3 */
1031 __le32 STP_IDLE_TMO; /* 4 */
1032 __le32 OPNRJT_RTRY_INTVL; /* 5 */
1033 __le32 Data_Cmd_OPNRJT_RTRY_TMO; /* 6 */
1034 __le32 Data_Cmd_OPNRJT_RTRY_THR; /* 7 */
1035 __le32 MAX_AIP; /* 8 */
1036} __attribute__((packed, aligned(4)));
1037
1038typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
1039
1040#define NDS_BITS 0x0F
1041#define PDS_BITS 0xF0
1042
1043/*
1044 * HW Events type
1045 */
1046
1047#define HW_EVENT_RESET_START 0x01
1048#define HW_EVENT_CHIP_RESET_COMPLETE 0x02
1049#define HW_EVENT_PHY_STOP_STATUS 0x03
1050#define HW_EVENT_SAS_PHY_UP 0x04
1051#define HW_EVENT_SATA_PHY_UP 0x05
1052#define HW_EVENT_SATA_SPINUP_HOLD 0x06
1053#define HW_EVENT_PHY_DOWN 0x07
1054#define HW_EVENT_PORT_INVALID 0x08
1055#define HW_EVENT_BROADCAST_CHANGE 0x09
1056#define HW_EVENT_PHY_ERROR 0x0A
1057#define HW_EVENT_BROADCAST_SES 0x0B
1058#define HW_EVENT_INBOUND_CRC_ERROR 0x0C
1059#define HW_EVENT_HARD_RESET_RECEIVED 0x0D
1060#define HW_EVENT_MALFUNCTION 0x0E
1061#define HW_EVENT_ID_FRAME_TIMEOUT 0x0F
1062#define HW_EVENT_BROADCAST_EXP 0x10
1063#define HW_EVENT_PHY_START_STATUS 0x11
1064#define HW_EVENT_LINK_ERR_INVALID_DWORD 0x12
1065#define HW_EVENT_LINK_ERR_DISPARITY_ERROR 0x13
1066#define HW_EVENT_LINK_ERR_CODE_VIOLATION 0x14
1067#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH 0x15
1068#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED 0x16
1069#define HW_EVENT_PORT_RECOVERY_TIMER_TMO 0x17
1070#define HW_EVENT_PORT_RECOVER 0x18
1071#define HW_EVENT_PORT_RESET_TIMER_TMO 0x19
1072#define HW_EVENT_PORT_RESET_COMPLETE 0x20
1073#define EVENT_BROADCAST_ASYNCH_EVENT 0x21
1074
1075/* port state */
1076#define PORT_NOT_ESTABLISHED 0x00
1077#define PORT_VALID 0x01
1078#define PORT_LOSTCOMM 0x02
1079#define PORT_IN_RESET 0x04
1080#define PORT_3RD_PARTY_RESET 0x07
1081#define PORT_INVALID 0x08
1082
1083/*
1084 * SSP/SMP/SATA IO Completion Status values
1085 */
1086
1087#define IO_SUCCESS 0x00
1088#define IO_ABORTED 0x01
1089#define IO_OVERFLOW 0x02
1090#define IO_UNDERFLOW 0x03
1091#define IO_FAILED 0x04
1092#define IO_ABORT_RESET 0x05
1093#define IO_NOT_VALID 0x06
1094#define IO_NO_DEVICE 0x07
1095#define IO_ILLEGAL_PARAMETER 0x08
1096#define IO_LINK_FAILURE 0x09
1097#define IO_PROG_ERROR 0x0A
1098
1099#define IO_EDC_IN_ERROR 0x0B
1100#define IO_EDC_OUT_ERROR 0x0C
1101#define IO_ERROR_HW_TIMEOUT 0x0D
1102#define IO_XFER_ERROR_BREAK 0x0E
1103#define IO_XFER_ERROR_PHY_NOT_READY 0x0F
1104#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED 0x10
1105#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION 0x11
1106#define IO_OPEN_CNX_ERROR_BREAK 0x12
1107#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS 0x13
1108#define IO_OPEN_CNX_ERROR_BAD_DESTINATION 0x14
1109#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED 0x15
1110#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY 0x16
1111#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION 0x17
1112/* This error code 0x18 is not used on SPCv */
1113#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR 0x18
1114#define IO_XFER_ERROR_NAK_RECEIVED 0x19
1115#define IO_XFER_ERROR_ACK_NAK_TIMEOUT 0x1A
1116#define IO_XFER_ERROR_PEER_ABORTED 0x1B
1117#define IO_XFER_ERROR_RX_FRAME 0x1C
1118#define IO_XFER_ERROR_DMA 0x1D
1119#define IO_XFER_ERROR_CREDIT_TIMEOUT 0x1E
1120#define IO_XFER_ERROR_SATA_LINK_TIMEOUT 0x1F
1121#define IO_XFER_ERROR_SATA 0x20
1122
1123/* This error code 0x22 is not used on SPCv */
1124#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST 0x22
1125#define IO_XFER_ERROR_REJECTED_NCQ_MODE 0x21
1126#define IO_XFER_ERROR_ABORTED_NCQ_MODE 0x23
1127#define IO_XFER_OPEN_RETRY_TIMEOUT 0x24
1128/* This error code 0x25 is not used on SPCv */
1129#define IO_XFER_SMP_RESP_CONNECTION_ERROR 0x25
1130#define IO_XFER_ERROR_UNEXPECTED_PHASE 0x26
1131#define IO_XFER_ERROR_XFER_RDY_OVERRUN 0x27
1132#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED 0x28
1133#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT 0x30
1134
1135/* The following error code 0x31 and 0x32 are not using (obsolete) */
1136#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK 0x31
1137#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK 0x32
1138
1139#define IO_XFER_ERROR_OFFSET_MISMATCH 0x34
1140#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN 0x35
1141#define IO_XFER_CMD_FRAME_ISSUED 0x36
1142#define IO_ERROR_INTERNAL_SMP_RESOURCE 0x37
1143#define IO_PORT_IN_RESET 0x38
1144#define IO_DS_NON_OPERATIONAL 0x39
1145#define IO_DS_IN_RECOVERY 0x3A
1146#define IO_TM_TAG_NOT_FOUND 0x3B
1147#define IO_XFER_PIO_SETUP_ERROR 0x3C
1148#define IO_SSP_EXT_IU_ZERO_LEN_ERROR 0x3D
1149#define IO_DS_IN_ERROR 0x3E
1150#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY 0x3F
1151#define IO_ABORT_IN_PROGRESS 0x40
1152#define IO_ABORT_DELAYED 0x41
1153#define IO_INVALID_LENGTH 0x42
1154
1155/********** additional response event values *****************/
1156
1157#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY_ALT 0x43
1158#define IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED 0x44
1159#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO 0x45
1160#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST 0x46
1161#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE 0x47
1162#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED 0x48
1163#define IO_DS_INVALID 0x49
1164/* WARNING: the value is not contiguous from here */
1165#define IO_XFER_ERR_LAST_PIO_DATAIN_CRC_ERR 0x52
1166#define IO_XFER_DMA_ACTIVATE_TIMEOUT 0x53
1167#define IO_XFER_ERROR_INTERNAL_CRC_ERROR 0x54
1168#define MPI_IO_RQE_BUSY_FULL 0x55
1169#define IO_XFER_ERR_EOB_DATA_OVERRUN 0x56
1170#define IO_XFR_ERROR_INVALID_SSP_RSP_FRAME 0x57
1171#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED 0x58
1172
1173#define MPI_ERR_IO_RESOURCE_UNAVAILABLE 0x1004
1174#define MPI_ERR_ATAPI_DEVICE_BUSY 0x1024
1175
1176#define IO_XFR_ERROR_DEK_KEY_CACHE_MISS 0x2040
1177/*
1178 * An encryption IO request failed due to DEK Key Tag mismatch.
1179 * The key tag supplied in the encryption IOMB does not match with
1180 * the Key Tag in the referenced DEK Entry.
1181 */
1182#define IO_XFR_ERROR_DEK_KEY_TAG_MISMATCH 0x2041
1183#define IO_XFR_ERROR_CIPHER_MODE_INVALID 0x2042
1184/*
1185 * An encryption I/O request failed because the initial value (IV)
1186 * in the unwrapped DEK blob didn't match the IV used to unwrap it.
1187 */
1188#define IO_XFR_ERROR_DEK_IV_MISMATCH 0x2043
1189/* An encryption I/O request failed due to an internal RAM ECC or
1190 * interface error while unwrapping the DEK. */
1191#define IO_XFR_ERROR_DEK_RAM_INTERFACE_ERROR 0x2044
1192/* An encryption I/O request failed due to an internal RAM ECC or
1193 * interface error while unwrapping the DEK. */
1194#define IO_XFR_ERROR_INTERNAL_RAM 0x2045
1195/*
1196 * An encryption I/O request failed
1197 * because the DEK index specified in the I/O was outside the bounds of
1198 * the total number of entries in the host DEK table.
1199 */
1200#define IO_XFR_ERROR_DEK_INDEX_OUT_OF_BOUNDS0x2046
1201
1202/* define DIF IO response error status code */
1203#define IO_XFR_ERROR_DIF_MISMATCH 0x3000
1204#define IO_XFR_ERROR_DIF_APPLICATION_TAG_MISMATCH 0x3001
1205#define IO_XFR_ERROR_DIF_REFERENCE_TAG_MISMATCH 0x3002
1206#define IO_XFR_ERROR_DIF_CRC_MISMATCH 0x3003
1207
1208/* define operator management response status and error qualifier code */
1209#define OPR_MGMT_OP_NOT_SUPPORTED 0x2060
1210#define OPR_MGMT_MPI_ENC_ERR_OPR_PARAM_ILLEGAL 0x2061
1211#define OPR_MGMT_MPI_ENC_ERR_OPR_ID_NOT_FOUND 0x2062
1212#define OPR_MGMT_MPI_ENC_ERR_OPR_ROLE_NOT_MATCH 0x2063
1213#define OPR_MGMT_MPI_ENC_ERR_OPR_MAX_NUM_EXCEEDED 0x2064
1214#define OPR_MGMT_MPI_ENC_ERR_KEK_UNWRAP_FAIL 0x2022
1215#define OPR_MGMT_MPI_ENC_ERR_NVRAM_OPERATION_FAILURE 0x2023
1216/***************** additional response event values ***************/
1217
1218/* WARNING: This error code must always be the last number.
1219 * If you add error code, modify this code also
1220 * It is used as an index
1221 */
1222#define IO_ERROR_UNKNOWN_GENERIC 0x2023
1223
1224/* MSGU CONFIGURATION TABLE*/
1225
1226#define SPCv_MSGU_CFG_TABLE_UPDATE 0x01
1227#define SPCv_MSGU_CFG_TABLE_RESET 0x02
1228#define SPCv_MSGU_CFG_TABLE_FREEZE 0x04
1229#define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x08
1230#define MSGU_IBDB_SET 0x00
1231#define MSGU_HOST_INT_STATUS 0x08
1232#define MSGU_HOST_INT_MASK 0x0C
1233#define MSGU_IOPIB_INT_STATUS 0x18
1234#define MSGU_IOPIB_INT_MASK 0x1C
1235#define MSGU_IBDB_CLEAR 0x20
1236
1237#define MSGU_MSGU_CONTROL 0x24
1238#define MSGU_ODR 0x20
1239#define MSGU_ODCR 0x28
1240
1241#define MSGU_ODMR 0x30
1242#define MSGU_ODMR_U 0x34
1243#define MSGU_ODMR_CLR 0x38
1244#define MSGU_ODMR_CLR_U 0x3C
1245#define MSGU_OD_RSVD 0x40
1246
1247#define MSGU_SCRATCH_PAD_0 0x44
1248#define MSGU_SCRATCH_PAD_1 0x48
1249#define MSGU_SCRATCH_PAD_2 0x4C
1250#define MSGU_SCRATCH_PAD_3 0x50
1251#define MSGU_HOST_SCRATCH_PAD_0 0x54
1252#define MSGU_HOST_SCRATCH_PAD_1 0x58
1253#define MSGU_HOST_SCRATCH_PAD_2 0x5C
1254#define MSGU_HOST_SCRATCH_PAD_3 0x60
1255#define MSGU_HOST_SCRATCH_PAD_4 0x64
1256#define MSGU_HOST_SCRATCH_PAD_5 0x68
1257#define MSGU_HOST_SCRATCH_PAD_6 0x6C
1258#define MSGU_HOST_SCRATCH_PAD_7 0x70
1259
1260/* bit definition for ODMR register */
1261#define ODMR_MASK_ALL 0xFFFFFFFF/* mask all
1262 interrupt vector */
1263#define ODMR_CLEAR_ALL 0 /* clear all
1264 interrupt vector */
1265/* bit definition for ODCR register */
1266#define ODCR_CLEAR_ALL 0xFFFFFFFF /* mask all
1267 interrupt vector*/
1268/* MSIX Interupts */
1269#define MSIX_TABLE_OFFSET 0x2000
1270#define MSIX_TABLE_ELEMENT_SIZE 0x10
1271#define MSIX_INTERRUPT_CONTROL_OFFSET 0xC
1272#define MSIX_TABLE_BASE (MSIX_TABLE_OFFSET + \
1273 MSIX_INTERRUPT_CONTROL_OFFSET)
1274#define MSIX_INTERRUPT_DISABLE 0x1
1275#define MSIX_INTERRUPT_ENABLE 0x0
1276
1277/* state definition for Scratch Pad1 register */
1278#define SCRATCH_PAD_RAAE_READY 0x3
1279#define SCRATCH_PAD_ILA_READY 0xC
1280#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0
1281#define SCRATCH_PAD_IOP0_READY 0xC00
1282#define SCRATCH_PAD_IOP1_READY 0x3000
1283
1284/* boot loader state */
1285#define SCRATCH_PAD1_BOOTSTATE_MASK 0x70 /* Bit 4-6 */
1286#define SCRATCH_PAD1_BOOTSTATE_SUCESS 0x0 /* Load successful */
1287#define SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM 0x10 /* HDA SEEPROM */
1288#define SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP 0x20 /* HDA BootStrap Pins */
1289#define SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET 0x30 /* HDA Soft Reset */
1290#define SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR 0x40 /* HDA critical error */
1291#define SCRATCH_PAD1_BOOTSTATE_R1 0x50 /* Reserved */
1292#define SCRATCH_PAD1_BOOTSTATE_R2 0x60 /* Reserved */
1293#define SCRATCH_PAD1_BOOTSTATE_FATAL 0x70 /* Fatal Error */
1294
1295 /* state definition for Scratch Pad2 register */
1296#define SCRATCH_PAD2_POR 0x00 /* power on state */
1297#define SCRATCH_PAD2_SFR 0x01 /* soft reset state */
1298#define SCRATCH_PAD2_ERR 0x02 /* error state */
1299#define SCRATCH_PAD2_RDY 0x03 /* ready state */
1300#define SCRATCH_PAD2_FWRDY_RST 0x04 /* FW rdy for soft reset flag */
1301#define SCRATCH_PAD2_IOPRDY_RST 0x08 /* IOP ready for soft reset */
1302#define SCRATCH_PAD2_STATE_MASK 0xFFFFFFF4 /* ScratchPad 2
1303 Mask, bit1-0 State */
1304#define SCRATCH_PAD2_RESERVED 0x000003FC/* Scratch Pad1
1305 Reserved bit 2 to 9 */
1306
1307#define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */
1308#define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */
1309
1310/* main configuration offset - byte offset */
1311#define MAIN_SIGNATURE_OFFSET 0x00 /* DWORD 0x00 */
1312#define MAIN_INTERFACE_REVISION 0x04 /* DWORD 0x01 */
1313#define MAIN_FW_REVISION 0x08 /* DWORD 0x02 */
1314#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C /* DWORD 0x03 */
1315#define MAIN_MAX_SGL_OFFSET 0x10 /* DWORD 0x04 */
1316#define MAIN_CNTRL_CAP_OFFSET 0x14 /* DWORD 0x05 */
1317#define MAIN_GST_OFFSET 0x18 /* DWORD 0x06 */
1318#define MAIN_IBQ_OFFSET 0x1C /* DWORD 0x07 */
1319#define MAIN_OBQ_OFFSET 0x20 /* DWORD 0x08 */
1320#define MAIN_IQNPPD_HPPD_OFFSET 0x24 /* DWORD 0x09 */
1321
1322/* 0x28 - 0x4C - RSVD */
1323#define MAIN_EVENT_CRC_CHECK 0x48 /* DWORD 0x12 */
1324#define MAIN_EVENT_LOG_ADDR_HI 0x50 /* DWORD 0x14 */
1325#define MAIN_EVENT_LOG_ADDR_LO 0x54 /* DWORD 0x15 */
1326#define MAIN_EVENT_LOG_BUFF_SIZE 0x58 /* DWORD 0x16 */
1327#define MAIN_EVENT_LOG_OPTION 0x5C /* DWORD 0x17 */
1328#define MAIN_PCS_EVENT_LOG_ADDR_HI 0x60 /* DWORD 0x18 */
1329#define MAIN_PCS_EVENT_LOG_ADDR_LO 0x64 /* DWORD 0x19 */
1330#define MAIN_PCS_EVENT_LOG_BUFF_SIZE 0x68 /* DWORD 0x1A */
1331#define MAIN_PCS_EVENT_LOG_OPTION 0x6C /* DWORD 0x1B */
1332#define MAIN_FATAL_ERROR_INTERRUPT 0x70 /* DWORD 0x1C */
1333#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74 /* DWORD 0x1D */
1334#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78 /* DWORD 0x1E */
1335#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C /* DWORD 0x1F */
1336#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80 /* DWORD 0x20 */
1337#define MAIN_GPIO_LED_FLAGS_OFFSET 0x84 /* DWORD 0x21 */
1338#define MAIN_ANALOG_SETUP_OFFSET 0x88 /* DWORD 0x22 */
1339
1340#define MAIN_INT_VECTOR_TABLE_OFFSET 0x8C /* DWORD 0x23 */
1341#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET 0x90 /* DWORD 0x24 */
1342#define MAIN_PORT_RECOVERY_TIMER 0x94 /* DWORD 0x25 */
1343#define MAIN_INT_REASSERTION_DELAY 0x98 /* DWORD 0x26 */
1344
1345/* Gereral Status Table offset - byte offset */
1346#define GST_GSTLEN_MPIS_OFFSET 0x00
1347#define GST_IQ_FREEZE_STATE0_OFFSET 0x04
1348#define GST_IQ_FREEZE_STATE1_OFFSET 0x08
1349#define GST_MSGUTCNT_OFFSET 0x0C
1350#define GST_IOPTCNT_OFFSET 0x10
1351/* 0x14 - 0x34 - RSVD */
1352#define GST_GPIO_INPUT_VAL 0x38
1353/* 0x3c - 0x40 - RSVD */
1354#define GST_RERRINFO_OFFSET0 0x44
1355#define GST_RERRINFO_OFFSET1 0x48
1356#define GST_RERRINFO_OFFSET2 0x4c
1357#define GST_RERRINFO_OFFSET3 0x50
1358#define GST_RERRINFO_OFFSET4 0x54
1359#define GST_RERRINFO_OFFSET5 0x58
1360#define GST_RERRINFO_OFFSET6 0x5c
1361#define GST_RERRINFO_OFFSET7 0x60
1362
1363/* General Status Table - MPI state */
1364#define GST_MPI_STATE_UNINIT 0x00
1365#define GST_MPI_STATE_INIT 0x01
1366#define GST_MPI_STATE_TERMINATION 0x02
1367#define GST_MPI_STATE_ERROR 0x03
1368#define GST_MPI_STATE_MASK 0x07
1369
1370/* Per SAS PHY Attributes */
1371
1372#define PSPA_PHYSTATE0_OFFSET 0x00 /* Dword V */
1373#define PSPA_OB_HW_EVENT_PID0_OFFSET 0x04 /* DWORD V+1 */
1374#define PSPA_PHYSTATE1_OFFSET 0x08 /* Dword V+2 */
1375#define PSPA_OB_HW_EVENT_PID1_OFFSET 0x0C /* DWORD V+3 */
1376#define PSPA_PHYSTATE2_OFFSET 0x10 /* Dword V+4 */
1377#define PSPA_OB_HW_EVENT_PID2_OFFSET 0x14 /* DWORD V+5 */
1378#define PSPA_PHYSTATE3_OFFSET 0x18 /* Dword V+6 */
1379#define PSPA_OB_HW_EVENT_PID3_OFFSET 0x1C /* DWORD V+7 */
1380#define PSPA_PHYSTATE4_OFFSET 0x20 /* Dword V+8 */
1381#define PSPA_OB_HW_EVENT_PID4_OFFSET 0x24 /* DWORD V+9 */
1382#define PSPA_PHYSTATE5_OFFSET 0x28 /* Dword V+10 */
1383#define PSPA_OB_HW_EVENT_PID5_OFFSET 0x2C /* DWORD V+11 */
1384#define PSPA_PHYSTATE6_OFFSET 0x30 /* Dword V+12 */
1385#define PSPA_OB_HW_EVENT_PID6_OFFSET 0x34 /* DWORD V+13 */
1386#define PSPA_PHYSTATE7_OFFSET 0x38 /* Dword V+14 */
1387#define PSPA_OB_HW_EVENT_PID7_OFFSET 0x3C /* DWORD V+15 */
1388#define PSPA_PHYSTATE8_OFFSET 0x40 /* DWORD V+16 */
1389#define PSPA_OB_HW_EVENT_PID8_OFFSET 0x44 /* DWORD V+17 */
1390#define PSPA_PHYSTATE9_OFFSET 0x48 /* DWORD V+18 */
1391#define PSPA_OB_HW_EVENT_PID9_OFFSET 0x4C /* DWORD V+19 */
1392#define PSPA_PHYSTATE10_OFFSET 0x50 /* DWORD V+20 */
1393#define PSPA_OB_HW_EVENT_PID10_OFFSET 0x54 /* DWORD V+21 */
1394#define PSPA_PHYSTATE11_OFFSET 0x58 /* DWORD V+22 */
1395#define PSPA_OB_HW_EVENT_PID11_OFFSET 0x5C /* DWORD V+23 */
1396#define PSPA_PHYSTATE12_OFFSET 0x60 /* DWORD V+24 */
1397#define PSPA_OB_HW_EVENT_PID12_OFFSET 0x64 /* DWORD V+25 */
1398#define PSPA_PHYSTATE13_OFFSET 0x68 /* DWORD V+26 */
1399#define PSPA_OB_HW_EVENT_PID13_OFFSET 0x6c /* DWORD V+27 */
1400#define PSPA_PHYSTATE14_OFFSET 0x70 /* DWORD V+28 */
1401#define PSPA_OB_HW_EVENT_PID14_OFFSET 0x74 /* DWORD V+29 */
1402#define PSPA_PHYSTATE15_OFFSET 0x78 /* DWORD V+30 */
1403#define PSPA_OB_HW_EVENT_PID15_OFFSET 0x7c /* DWORD V+31 */
1404/* end PSPA */
1405
1406/* inbound queue configuration offset - byte offset */
1407#define IB_PROPERITY_OFFSET 0x00
1408#define IB_BASE_ADDR_HI_OFFSET 0x04
1409#define IB_BASE_ADDR_LO_OFFSET 0x08
1410#define IB_CI_BASE_ADDR_HI_OFFSET 0x0C
1411#define IB_CI_BASE_ADDR_LO_OFFSET 0x10
1412#define IB_PIPCI_BAR 0x14
1413#define IB_PIPCI_BAR_OFFSET 0x18
1414#define IB_RESERVED_OFFSET 0x1C
1415
1416/* outbound queue configuration offset - byte offset */
1417#define OB_PROPERITY_OFFSET 0x00
1418#define OB_BASE_ADDR_HI_OFFSET 0x04
1419#define OB_BASE_ADDR_LO_OFFSET 0x08
1420#define OB_PI_BASE_ADDR_HI_OFFSET 0x0C
1421#define OB_PI_BASE_ADDR_LO_OFFSET 0x10
1422#define OB_CIPCI_BAR 0x14
1423#define OB_CIPCI_BAR_OFFSET 0x18
1424#define OB_INTERRUPT_COALES_OFFSET 0x1C
1425#define OB_DYNAMIC_COALES_OFFSET 0x20
1426#define OB_PROPERTY_INT_ENABLE 0x40000000
1427
1428#define MBIC_NMI_ENABLE_VPE0_IOP 0x000418
1429#define MBIC_NMI_ENABLE_VPE0_AAP1 0x000418
1430/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */
1431#define PCIE_EVENT_INTERRUPT_ENABLE 0x003040
1432#define PCIE_EVENT_INTERRUPT 0x003044
1433#define PCIE_ERROR_INTERRUPT_ENABLE 0x003048
1434#define PCIE_ERROR_INTERRUPT 0x00304C
1435
1436/* SPCV soft reset */
1437#define SPC_REG_SOFT_RESET 0x00001000
1438#define SPCv_NORMAL_RESET_VALUE 0x1
1439
1440#define SPCv_SOFT_RESET_READ_MASK 0xC0
1441#define SPCv_SOFT_RESET_NO_RESET 0x0
1442#define SPCv_SOFT_RESET_NORMAL_RESET_OCCURED 0x40
1443#define SPCv_SOFT_RESET_HDA_MODE_OCCURED 0x80
1444#define SPCv_SOFT_RESET_CHIP_RESET_OCCURED 0xC0
1445
1446/* signature definition for host scratch pad0 register */
1447#define SPC_SOFT_RESET_SIGNATURE 0x252acbcd
1448/* Signature for Soft Reset */
1449
1450/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */
1451#define SPC_REG_RESET 0x000000/* reset register */
1452
1453/* bit definition for SPC_RESET register */
1454#define SPC_REG_RESET_OSSP 0x00000001
1455#define SPC_REG_RESET_RAAE 0x00000002
1456#define SPC_REG_RESET_PCS_SPBC 0x00000004
1457#define SPC_REG_RESET_PCS_IOP_SS 0x00000008
1458#define SPC_REG_RESET_PCS_AAP1_SS 0x00000010
1459#define SPC_REG_RESET_PCS_AAP2_SS 0x00000020
1460#define SPC_REG_RESET_PCS_LM 0x00000040
1461#define SPC_REG_RESET_PCS 0x00000080
1462#define SPC_REG_RESET_GSM 0x00000100
1463#define SPC_REG_RESET_DDR2 0x00010000
1464#define SPC_REG_RESET_BDMA_CORE 0x00020000
1465#define SPC_REG_RESET_BDMA_SXCBI 0x00040000
1466#define SPC_REG_RESET_PCIE_AL_SXCBI 0x00080000
1467#define SPC_REG_RESET_PCIE_PWR 0x00100000
1468#define SPC_REG_RESET_PCIE_SFT 0x00200000
1469#define SPC_REG_RESET_PCS_SXCBI 0x00400000
1470#define SPC_REG_RESET_LMS_SXCBI 0x00800000
1471#define SPC_REG_RESET_PMIC_SXCBI 0x01000000
1472#define SPC_REG_RESET_PMIC_CORE 0x02000000
1473#define SPC_REG_RESET_PCIE_PC_SXCBI 0x04000000
1474#define SPC_REG_RESET_DEVICE 0x80000000
1475
1476/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */
1477#define SPCV_IBW_AXI_TRANSLATION_LOW 0x001010
1478
1479#define MBIC_AAP1_ADDR_BASE 0x060000
1480#define MBIC_IOP_ADDR_BASE 0x070000
1481#define GSM_ADDR_BASE 0x0700000
1482/* Dynamic map through Bar4 - 0x00700000 */
1483#define GSM_CONFIG_RESET 0x00000000
1484#define RAM_ECC_DB_ERR 0x00000018
1485#define GSM_READ_ADDR_PARITY_INDIC 0x00000058
1486#define GSM_WRITE_ADDR_PARITY_INDIC 0x00000060
1487#define GSM_WRITE_DATA_PARITY_INDIC 0x00000068
1488#define GSM_READ_ADDR_PARITY_CHECK 0x00000038
1489#define GSM_WRITE_ADDR_PARITY_CHECK 0x00000040
1490#define GSM_WRITE_DATA_PARITY_CHECK 0x00000048
1491
1492#define RB6_ACCESS_REG 0x6A0000
1493#define HDAC_EXEC_CMD 0x0002
1494#define HDA_C_PA 0xcb
1495#define HDA_SEQ_ID_BITS 0x00ff0000
1496#define HDA_GSM_OFFSET_BITS 0x00FFFFFF
1497#define HDA_GSM_CMD_OFFSET_BITS 0x42C0
1498#define HDA_GSM_RSP_OFFSET_BITS 0x42E0
1499
1500#define MBIC_AAP1_ADDR_BASE 0x060000
1501#define MBIC_IOP_ADDR_BASE 0x070000
1502#define GSM_ADDR_BASE 0x0700000
1503#define SPC_TOP_LEVEL_ADDR_BASE 0x000000
1504#define GSM_CONFIG_RESET_VALUE 0x00003b00
1505#define GPIO_ADDR_BASE 0x00090000
1506#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET 0x0000010c
1507
1508/* RB6 offset */
1509#define SPC_RB6_OFFSET 0x80C0
1510/* Magic number of soft reset for RB6 */
1511#define RB6_MAGIC_NUMBER_RST 0x1234
1512
1513/* Device Register status */
1514#define DEVREG_SUCCESS 0x00
1515#define DEVREG_FAILURE_OUT_OF_RESOURCE 0x01
1516#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED 0x02
1517#define DEVREG_FAILURE_INVALID_PHY_ID 0x03
1518#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED 0x04
1519#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE 0x05
1520#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06
1521#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07
1522
1523#endif
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 317a7fdc3b82..23d607218ae8 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -24,7 +24,9 @@ config SCSI_QLA_FC
24 24
25 Firmware images can be retrieved from: 25 Firmware images can be retrieved from:
26 26
27 ftp://ftp.qlogic.com/outgoing/linux/firmware/ 27 http://ldriver.qlogic.com/firmware/
28
29 They are also included in the linux-firmware tree as well.
28 30
29config TCM_QLA2XXX 31config TCM_QLA2XXX
30 tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs" 32 tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 729b74389f83..937fed8cb038 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -3003,12 +3003,10 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
3003 3003
3004 /* Set transfer direction */ 3004 /* Set transfer direction */
3005 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 3005 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
3006 lcmd_pkt->cntrl_flags = 3006 lcmd_pkt->cntrl_flags = TMF_WRITE_DATA;
3007 __constant_cpu_to_le16(TMF_WRITE_DATA);
3008 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 3007 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
3009 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 3008 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
3010 lcmd_pkt->cntrl_flags = 3009 lcmd_pkt->cntrl_flags = TMF_READ_DATA;
3011 __constant_cpu_to_le16(TMF_READ_DATA);
3012 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 3010 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
3013 } 3011 }
3014 3012
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 5307bf86d5e0..ad72c1d85111 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -644,7 +644,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
644 qla2x00_rel_sp(sp->fcport->vha, sp); 644 qla2x00_rel_sp(sp->fcport->vha, sp);
645} 645}
646 646
647void 647static void
648qla2x00_sp_compl(void *data, void *ptr, int res) 648qla2x00_sp_compl(void *data, void *ptr, int res)
649{ 649{
650 struct qla_hw_data *ha = (struct qla_hw_data *)data; 650 struct qla_hw_data *ha = (struct qla_hw_data *)data;
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 14fec976f634..fad71ed067ec 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -507,6 +507,7 @@ static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
507 mrb->mbox_cmd = in_mbox[0]; 507 mrb->mbox_cmd = in_mbox[0];
508 wmb(); 508 wmb();
509 509
510 ha->iocb_cnt += mrb->iocb_cnt;
510 ha->isp_ops->queue_iocb(ha); 511 ha->isp_ops->queue_iocb(ha);
511exit_mbox_iocb: 512exit_mbox_iocb:
512 spin_unlock_irqrestore(&ha->hardware_lock, flags); 513 spin_unlock_irqrestore(&ha->hardware_lock, flags);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index a47f99957ba8..4d231c12463e 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -2216,14 +2216,14 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
2216 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain); 2216 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
2217 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt); 2217 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
2218 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); 2218 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
2219 fw_ddb_entry->tcp_xmt_wsf = cpu_to_le16(conn->tcp_xmit_wsf); 2219 fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
2220 fw_ddb_entry->tcp_rcv_wsf = cpu_to_le16(conn->tcp_recv_wsf); 2220 fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
2221 fw_ddb_entry->ipv4_tos = conn->ipv4_tos; 2221 fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
2222 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); 2222 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
2223 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); 2223 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
2224 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); 2224 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
2225 fw_ddb_entry->stat_sn = cpu_to_le16(conn->statsn); 2225 fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
2226 fw_ddb_entry->exp_stat_sn = cpu_to_le16(conn->exp_statsn); 2226 fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
2227 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type); 2227 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type);
2228 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); 2228 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
2229 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); 2229 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
@@ -5504,9 +5504,9 @@ static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
5504 * If this is invoked as a result of a userspace call then the entry is marked 5504 * If this is invoked as a result of a userspace call then the entry is marked
5505 * as nonpersistent using flash_state field. 5505 * as nonpersistent using flash_state field.
5506 **/ 5506 **/
5507int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, 5507static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
5508 struct dev_db_entry *fw_ddb_entry, 5508 struct dev_db_entry *fw_ddb_entry,
5509 uint16_t *idx, int user) 5509 uint16_t *idx, int user)
5510{ 5510{
5511 struct iscsi_bus_flash_session *fnode_sess = NULL; 5511 struct iscsi_bus_flash_session *fnode_sess = NULL;
5512 struct iscsi_bus_flash_conn *fnode_conn = NULL; 5512 struct iscsi_bus_flash_conn *fnode_conn = NULL;
@@ -5605,6 +5605,7 @@ static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
5605 ql4_printk(KERN_ERR, ha, 5605 ql4_printk(KERN_ERR, ha,
5606 "%s: A non-persistent entry %s found\n", 5606 "%s: A non-persistent entry %s found\n",
5607 __func__, dev->kobj.name); 5607 __func__, dev->kobj.name);
5608 put_device(dev);
5608 goto exit_ddb_add; 5609 goto exit_ddb_add;
5609 } 5610 }
5610 5611
@@ -6112,8 +6113,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
6112 int parent_type, parent_index = 0xffff; 6113 int parent_type, parent_index = 0xffff;
6113 int rc = 0; 6114 int rc = 0;
6114 6115
6115 dev = iscsi_find_flashnode_conn(fnode_sess, NULL, 6116 dev = iscsi_find_flashnode_conn(fnode_sess);
6116 iscsi_is_flashnode_conn_dev);
6117 if (!dev) 6117 if (!dev)
6118 return -EIO; 6118 return -EIO;
6119 6119
@@ -6276,8 +6276,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
6276 rc = sprintf(buf, "\n"); 6276 rc = sprintf(buf, "\n");
6277 break; 6277 break;
6278 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: 6278 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
6279 if ((fnode_sess->discovery_parent_idx) >= 0 && 6279 if (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES)
6280 (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES))
6281 parent_index = fnode_sess->discovery_parent_idx; 6280 parent_index = fnode_sess->discovery_parent_idx;
6282 6281
6283 rc = sprintf(buf, "%u\n", parent_index); 6282 rc = sprintf(buf, "%u\n", parent_index);
@@ -6287,8 +6286,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
6287 parent_type = ISCSI_DISC_PARENT_ISNS; 6286 parent_type = ISCSI_DISC_PARENT_ISNS;
6288 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK) 6287 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
6289 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 6288 parent_type = ISCSI_DISC_PARENT_UNKNOWN;
6290 else if (fnode_sess->discovery_parent_type >= 0 && 6289 else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
6291 fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
6292 parent_type = ISCSI_DISC_PARENT_SENDTGT; 6290 parent_type = ISCSI_DISC_PARENT_SENDTGT;
6293 else 6291 else
6294 parent_type = ISCSI_DISC_PARENT_UNKNOWN; 6292 parent_type = ISCSI_DISC_PARENT_UNKNOWN;
@@ -6349,6 +6347,8 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
6349 rc = -ENOSYS; 6347 rc = -ENOSYS;
6350 break; 6348 break;
6351 } 6349 }
6350
6351 put_device(dev);
6352 return rc; 6352 return rc;
6353} 6353}
6354 6354
@@ -6368,20 +6368,11 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
6368{ 6368{
6369 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); 6369 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6370 struct scsi_qla_host *ha = to_qla_host(shost); 6370 struct scsi_qla_host *ha = to_qla_host(shost);
6371 struct dev_db_entry *fw_ddb_entry = NULL;
6372 struct iscsi_flashnode_param_info *fnode_param; 6371 struct iscsi_flashnode_param_info *fnode_param;
6373 struct nlattr *attr; 6372 struct nlattr *attr;
6374 int rc = QLA_ERROR; 6373 int rc = QLA_ERROR;
6375 uint32_t rem = len; 6374 uint32_t rem = len;
6376 6375
6377 fw_ddb_entry = kzalloc(sizeof(*fw_ddb_entry), GFP_KERNEL);
6378 if (!fw_ddb_entry) {
6379 DEBUG2(ql4_printk(KERN_ERR, ha,
6380 "%s: Unable to allocate ddb buffer\n",
6381 __func__));
6382 return -ENOMEM;
6383 }
6384
6385 nla_for_each_attr(attr, data, len, rem) { 6376 nla_for_each_attr(attr, data, len, rem) {
6386 fnode_param = nla_data(attr); 6377 fnode_param = nla_data(attr);
6387 6378
@@ -6590,16 +6581,11 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
6590 struct dev_db_entry *fw_ddb_entry = NULL; 6581 struct dev_db_entry *fw_ddb_entry = NULL;
6591 dma_addr_t fw_ddb_entry_dma; 6582 dma_addr_t fw_ddb_entry_dma;
6592 uint16_t *ddb_cookie = NULL; 6583 uint16_t *ddb_cookie = NULL;
6593 size_t ddb_size; 6584 size_t ddb_size = 0;
6594 void *pddb = NULL; 6585 void *pddb = NULL;
6595 int target_id; 6586 int target_id;
6596 int rc = 0; 6587 int rc = 0;
6597 6588
6598 if (!fnode_sess) {
6599 rc = -EINVAL;
6600 goto exit_ddb_del;
6601 }
6602
6603 if (fnode_sess->is_boot_target) { 6589 if (fnode_sess->is_boot_target) {
6604 rc = -EPERM; 6590 rc = -EPERM;
6605 DEBUG2(ql4_printk(KERN_ERR, ha, 6591 DEBUG2(ql4_printk(KERN_ERR, ha,
@@ -6631,8 +6617,7 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
6631 6617
6632 dev_db_start_offset += (fnode_sess->target_id * 6618 dev_db_start_offset += (fnode_sess->target_id *
6633 sizeof(*fw_ddb_entry)); 6619 sizeof(*fw_ddb_entry));
6634 dev_db_start_offset += (void *)&(fw_ddb_entry->cookie) - 6620 dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
6635 (void *)fw_ddb_entry;
6636 6621
6637 ddb_size = sizeof(*ddb_cookie); 6622 ddb_size = sizeof(*ddb_cookie);
6638 } 6623 }
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 83e0fec35d56..fe873cf7570d 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.03.00-k8" 8#define QLA4XXX_DRIVER_VERSION "5.03.00-k9"
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 5add6f4e7928..0a537a0515ca 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1997,24 +1997,39 @@ out:
1997 return ret; 1997 return ret;
1998} 1998}
1999 1999
2000static unsigned int map_state(sector_t lba, unsigned int *num) 2000static unsigned long lba_to_map_index(sector_t lba)
2001{
2002 if (scsi_debug_unmap_alignment) {
2003 lba += scsi_debug_unmap_granularity -
2004 scsi_debug_unmap_alignment;
2005 }
2006 do_div(lba, scsi_debug_unmap_granularity);
2007
2008 return lba;
2009}
2010
2011static sector_t map_index_to_lba(unsigned long index)
2001{ 2012{
2002 unsigned int granularity, alignment, mapped; 2013 return index * scsi_debug_unmap_granularity -
2003 sector_t block, next, end; 2014 scsi_debug_unmap_alignment;
2015}
2004 2016
2005 granularity = scsi_debug_unmap_granularity; 2017static unsigned int map_state(sector_t lba, unsigned int *num)
2006 alignment = granularity - scsi_debug_unmap_alignment; 2018{
2007 block = lba + alignment; 2019 sector_t end;
2008 do_div(block, granularity); 2020 unsigned int mapped;
2021 unsigned long index;
2022 unsigned long next;
2009 2023
2010 mapped = test_bit(block, map_storep); 2024 index = lba_to_map_index(lba);
2025 mapped = test_bit(index, map_storep);
2011 2026
2012 if (mapped) 2027 if (mapped)
2013 next = find_next_zero_bit(map_storep, map_size, block); 2028 next = find_next_zero_bit(map_storep, map_size, index);
2014 else 2029 else
2015 next = find_next_bit(map_storep, map_size, block); 2030 next = find_next_bit(map_storep, map_size, index);
2016 2031
2017 end = next * granularity - scsi_debug_unmap_alignment; 2032 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
2018 *num = end - lba; 2033 *num = end - lba;
2019 2034
2020 return mapped; 2035 return mapped;
@@ -2022,47 +2037,37 @@ static unsigned int map_state(sector_t lba, unsigned int *num)
2022 2037
2023static void map_region(sector_t lba, unsigned int len) 2038static void map_region(sector_t lba, unsigned int len)
2024{ 2039{
2025 unsigned int granularity, alignment;
2026 sector_t end = lba + len; 2040 sector_t end = lba + len;
2027 2041
2028 granularity = scsi_debug_unmap_granularity;
2029 alignment = granularity - scsi_debug_unmap_alignment;
2030
2031 while (lba < end) { 2042 while (lba < end) {
2032 sector_t block, rem; 2043 unsigned long index = lba_to_map_index(lba);
2033
2034 block = lba + alignment;
2035 rem = do_div(block, granularity);
2036 2044
2037 if (block < map_size) 2045 if (index < map_size)
2038 set_bit(block, map_storep); 2046 set_bit(index, map_storep);
2039 2047
2040 lba += granularity - rem; 2048 lba = map_index_to_lba(index + 1);
2041 } 2049 }
2042} 2050}
2043 2051
2044static void unmap_region(sector_t lba, unsigned int len) 2052static void unmap_region(sector_t lba, unsigned int len)
2045{ 2053{
2046 unsigned int granularity, alignment;
2047 sector_t end = lba + len; 2054 sector_t end = lba + len;
2048 2055
2049 granularity = scsi_debug_unmap_granularity;
2050 alignment = granularity - scsi_debug_unmap_alignment;
2051
2052 while (lba < end) { 2056 while (lba < end) {
2053 sector_t block, rem; 2057 unsigned long index = lba_to_map_index(lba);
2054
2055 block = lba + alignment;
2056 rem = do_div(block, granularity);
2057 2058
2058 if (rem == 0 && lba + granularity < end && block < map_size) { 2059 if (lba == map_index_to_lba(index) &&
2059 clear_bit(block, map_storep); 2060 lba + scsi_debug_unmap_granularity <= end &&
2060 if (scsi_debug_lbprz) 2061 index < map_size) {
2062 clear_bit(index, map_storep);
2063 if (scsi_debug_lbprz) {
2061 memset(fake_storep + 2064 memset(fake_storep +
2062 block * scsi_debug_sector_size, 0, 2065 lba * scsi_debug_sector_size, 0,
2063 scsi_debug_sector_size); 2066 scsi_debug_sector_size *
2067 scsi_debug_unmap_granularity);
2068 }
2064 } 2069 }
2065 lba += granularity - rem; 2070 lba = map_index_to_lba(index + 1);
2066 } 2071 }
2067} 2072}
2068 2073
@@ -2089,7 +2094,7 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2089 2094
2090 write_lock_irqsave(&atomic_rw, iflags); 2095 write_lock_irqsave(&atomic_rw, iflags);
2091 ret = do_device_access(SCpnt, devip, lba, num, 1); 2096 ret = do_device_access(SCpnt, devip, lba, num, 1);
2092 if (scsi_debug_unmap_granularity) 2097 if (scsi_debug_lbp())
2093 map_region(lba, num); 2098 map_region(lba, num);
2094 write_unlock_irqrestore(&atomic_rw, iflags); 2099 write_unlock_irqrestore(&atomic_rw, iflags);
2095 if (-1 == ret) 2100 if (-1 == ret)
@@ -2122,7 +2127,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2122 2127
2123 write_lock_irqsave(&atomic_rw, iflags); 2128 write_lock_irqsave(&atomic_rw, iflags);
2124 2129
2125 if (unmap && scsi_debug_unmap_granularity) { 2130 if (unmap && scsi_debug_lbp()) {
2126 unmap_region(lba, num); 2131 unmap_region(lba, num);
2127 goto out; 2132 goto out;
2128 } 2133 }
@@ -2146,7 +2151,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2146 fake_storep + (lba * scsi_debug_sector_size), 2151 fake_storep + (lba * scsi_debug_sector_size),
2147 scsi_debug_sector_size); 2152 scsi_debug_sector_size);
2148 2153
2149 if (scsi_debug_unmap_granularity) 2154 if (scsi_debug_lbp())
2150 map_region(lba, num); 2155 map_region(lba, num);
2151out: 2156out:
2152 write_unlock_irqrestore(&atomic_rw, iflags); 2157 write_unlock_irqrestore(&atomic_rw, iflags);
@@ -3389,8 +3394,6 @@ static int __init scsi_debug_init(void)
3389 3394
3390 /* Logical Block Provisioning */ 3395 /* Logical Block Provisioning */
3391 if (scsi_debug_lbp()) { 3396 if (scsi_debug_lbp()) {
3392 unsigned int map_bytes;
3393
3394 scsi_debug_unmap_max_blocks = 3397 scsi_debug_unmap_max_blocks =
3395 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU); 3398 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3396 3399
@@ -3401,16 +3404,16 @@ static int __init scsi_debug_init(void)
3401 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU); 3404 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3402 3405
3403 if (scsi_debug_unmap_alignment && 3406 if (scsi_debug_unmap_alignment &&
3404 scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) { 3407 scsi_debug_unmap_granularity <=
3408 scsi_debug_unmap_alignment) {
3405 printk(KERN_ERR 3409 printk(KERN_ERR
3406 "%s: ERR: unmap_granularity < unmap_alignment\n", 3410 "%s: ERR: unmap_granularity <= unmap_alignment\n",
3407 __func__); 3411 __func__);
3408 return -EINVAL; 3412 return -EINVAL;
3409 } 3413 }
3410 3414
3411 map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity); 3415 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
3412 map_bytes = map_size >> 3; 3416 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
3413 map_storep = vmalloc(map_bytes);
3414 3417
3415 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n", 3418 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3416 map_size); 3419 map_size);
@@ -3421,7 +3424,7 @@ static int __init scsi_debug_init(void)
3421 goto free_vm; 3424 goto free_vm;
3422 } 3425 }
3423 3426
3424 memset(map_storep, 0x0, map_bytes); 3427 bitmap_zero(map_storep, map_size);
3425 3428
3426 /* Map first 1KB for partition table */ 3429 /* Map first 1KB for partition table */
3427 if (scsi_debug_num_parts) 3430 if (scsi_debug_num_parts)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index c1b05a83d403..f43de1e56420 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -25,6 +25,7 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/blkdev.h> 26#include <linux/blkdev.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/jiffies.h>
28 29
29#include <scsi/scsi.h> 30#include <scsi/scsi.h>
30#include <scsi/scsi_cmnd.h> 31#include <scsi/scsi_cmnd.h>
@@ -791,32 +792,48 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
791 struct scsi_device *sdev = scmd->device; 792 struct scsi_device *sdev = scmd->device;
792 struct Scsi_Host *shost = sdev->host; 793 struct Scsi_Host *shost = sdev->host;
793 DECLARE_COMPLETION_ONSTACK(done); 794 DECLARE_COMPLETION_ONSTACK(done);
794 unsigned long timeleft; 795 unsigned long timeleft = timeout;
795 struct scsi_eh_save ses; 796 struct scsi_eh_save ses;
797 const unsigned long stall_for = msecs_to_jiffies(100);
796 int rtn; 798 int rtn;
797 799
800retry:
798 scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes); 801 scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
799 shost->eh_action = &done; 802 shost->eh_action = &done;
800 803
801 scsi_log_send(scmd); 804 scsi_log_send(scmd);
802 scmd->scsi_done = scsi_eh_done; 805 scmd->scsi_done = scsi_eh_done;
803 shost->hostt->queuecommand(shost, scmd); 806 rtn = shost->hostt->queuecommand(shost, scmd);
804 807 if (rtn) {
805 timeleft = wait_for_completion_timeout(&done, timeout); 808 if (timeleft > stall_for) {
809 scsi_eh_restore_cmnd(scmd, &ses);
810 timeleft -= stall_for;
811 msleep(jiffies_to_msecs(stall_for));
812 goto retry;
813 }
814 /* signal not to enter either branch of the if () below */
815 timeleft = 0;
816 rtn = NEEDS_RETRY;
817 } else {
818 timeleft = wait_for_completion_timeout(&done, timeout);
819 }
806 820
807 shost->eh_action = NULL; 821 shost->eh_action = NULL;
808 822
809 scsi_log_completion(scmd, SUCCESS); 823 scsi_log_completion(scmd, rtn);
810 824
811 SCSI_LOG_ERROR_RECOVERY(3, 825 SCSI_LOG_ERROR_RECOVERY(3,
812 printk("%s: scmd: %p, timeleft: %ld\n", 826 printk("%s: scmd: %p, timeleft: %ld\n",
813 __func__, scmd, timeleft)); 827 __func__, scmd, timeleft));
814 828
815 /* 829 /*
816 * If there is time left scsi_eh_done got called, and we will 830 * If there is time left scsi_eh_done got called, and we will examine
817 * examine the actual status codes to see whether the command 831 * the actual status codes to see whether the command actually did
818 * actually did complete normally, else tell the host to forget 832 * complete normally, else if we have a zero return and no time left,
819 * about this command. 833 * the command must still be pending, so abort it and return FAILED.
834 * If we never actually managed to issue the command, because
835 * ->queuecommand() kept returning non zero, use the rtn = FAILED
836 * value above (so don't execute either branch of the if)
820 */ 837 */
821 if (timeleft) { 838 if (timeleft) {
822 rtn = scsi_eh_completed_normally(scmd); 839 rtn = scsi_eh_completed_normally(scmd);
@@ -837,7 +854,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
837 rtn = FAILED; 854 rtn = FAILED;
838 break; 855 break;
839 } 856 }
840 } else { 857 } else if (!rtn) {
841 scsi_abort_eh_cmnd(scmd); 858 scsi_abort_eh_cmnd(scmd);
842 rtn = FAILED; 859 rtn = FAILED;
843 } 860 }
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c31187d79343..86d522004a20 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -276,11 +276,10 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
276} 276}
277EXPORT_SYMBOL(scsi_execute); 277EXPORT_SYMBOL(scsi_execute);
278 278
279 279int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
280int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
281 int data_direction, void *buffer, unsigned bufflen, 280 int data_direction, void *buffer, unsigned bufflen,
282 struct scsi_sense_hdr *sshdr, int timeout, int retries, 281 struct scsi_sense_hdr *sshdr, int timeout, int retries,
283 int *resid) 282 int *resid, int flags)
284{ 283{
285 char *sense = NULL; 284 char *sense = NULL;
286 int result; 285 int result;
@@ -291,14 +290,14 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
291 return DRIVER_ERROR << 24; 290 return DRIVER_ERROR << 24;
292 } 291 }
293 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 292 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
294 sense, timeout, retries, 0, resid); 293 sense, timeout, retries, flags, resid);
295 if (sshdr) 294 if (sshdr)
296 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 295 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
297 296
298 kfree(sense); 297 kfree(sense);
299 return result; 298 return result;
300} 299}
301EXPORT_SYMBOL(scsi_execute_req); 300EXPORT_SYMBOL(scsi_execute_req_flags);
302 301
303/* 302/*
304 * Function: scsi_init_cmd_errh() 303 * Function: scsi_init_cmd_errh()
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 8f6b12cbd224..42539ee2cb11 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -144,33 +144,83 @@ static int scsi_bus_restore(struct device *dev)
144 144
145#ifdef CONFIG_PM_RUNTIME 145#ifdef CONFIG_PM_RUNTIME
146 146
147static int sdev_blk_runtime_suspend(struct scsi_device *sdev,
148 int (*cb)(struct device *))
149{
150 int err;
151
152 err = blk_pre_runtime_suspend(sdev->request_queue);
153 if (err)
154 return err;
155 if (cb)
156 err = cb(&sdev->sdev_gendev);
157 blk_post_runtime_suspend(sdev->request_queue, err);
158
159 return err;
160}
161
162static int sdev_runtime_suspend(struct device *dev)
163{
164 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
165 int (*cb)(struct device *) = pm ? pm->runtime_suspend : NULL;
166 struct scsi_device *sdev = to_scsi_device(dev);
167 int err;
168
169 if (sdev->request_queue->dev)
170 return sdev_blk_runtime_suspend(sdev, cb);
171
172 err = scsi_dev_type_suspend(dev, cb);
173 if (err == -EAGAIN)
174 pm_schedule_suspend(dev, jiffies_to_msecs(
175 round_jiffies_up_relative(HZ/10)));
176 return err;
177}
178
147static int scsi_runtime_suspend(struct device *dev) 179static int scsi_runtime_suspend(struct device *dev)
148{ 180{
149 int err = 0; 181 int err = 0;
150 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
151 182
152 dev_dbg(dev, "scsi_runtime_suspend\n"); 183 dev_dbg(dev, "scsi_runtime_suspend\n");
153 if (scsi_is_sdev_device(dev)) { 184 if (scsi_is_sdev_device(dev))
154 err = scsi_dev_type_suspend(dev, 185 err = sdev_runtime_suspend(dev);
155 pm ? pm->runtime_suspend : NULL);
156 if (err == -EAGAIN)
157 pm_schedule_suspend(dev, jiffies_to_msecs(
158 round_jiffies_up_relative(HZ/10)));
159 }
160 186
161 /* Insert hooks here for targets, hosts, and transport classes */ 187 /* Insert hooks here for targets, hosts, and transport classes */
162 188
163 return err; 189 return err;
164} 190}
165 191
166static int scsi_runtime_resume(struct device *dev) 192static int sdev_blk_runtime_resume(struct scsi_device *sdev,
193 int (*cb)(struct device *))
167{ 194{
168 int err = 0; 195 int err = 0;
196
197 blk_pre_runtime_resume(sdev->request_queue);
198 if (cb)
199 err = cb(&sdev->sdev_gendev);
200 blk_post_runtime_resume(sdev->request_queue, err);
201
202 return err;
203}
204
205static int sdev_runtime_resume(struct device *dev)
206{
207 struct scsi_device *sdev = to_scsi_device(dev);
169 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 208 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
209 int (*cb)(struct device *) = pm ? pm->runtime_resume : NULL;
210
211 if (sdev->request_queue->dev)
212 return sdev_blk_runtime_resume(sdev, cb);
213 else
214 return scsi_dev_type_resume(dev, cb);
215}
216
217static int scsi_runtime_resume(struct device *dev)
218{
219 int err = 0;
170 220
171 dev_dbg(dev, "scsi_runtime_resume\n"); 221 dev_dbg(dev, "scsi_runtime_resume\n");
172 if (scsi_is_sdev_device(dev)) 222 if (scsi_is_sdev_device(dev))
173 err = scsi_dev_type_resume(dev, pm ? pm->runtime_resume : NULL); 223 err = sdev_runtime_resume(dev);
174 224
175 /* Insert hooks here for targets, hosts, and transport classes */ 225 /* Insert hooks here for targets, hosts, and transport classes */
176 226
@@ -185,10 +235,18 @@ static int scsi_runtime_idle(struct device *dev)
185 235
186 /* Insert hooks here for targets, hosts, and transport classes */ 236 /* Insert hooks here for targets, hosts, and transport classes */
187 237
188 if (scsi_is_sdev_device(dev)) 238 if (scsi_is_sdev_device(dev)) {
189 err = pm_schedule_suspend(dev, 100); 239 struct scsi_device *sdev = to_scsi_device(dev);
190 else 240
241 if (sdev->request_queue->dev) {
242 pm_runtime_mark_last_busy(dev);
243 err = pm_runtime_autosuspend(dev);
244 } else {
245 err = pm_runtime_suspend(dev);
246 }
247 } else {
191 err = pm_runtime_suspend(dev); 248 err = pm_runtime_suspend(dev);
249 }
192 return err; 250 return err;
193} 251}
194 252
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 47799a33d6ca..133926b1bb78 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1019,8 +1019,7 @@ exit_match_index:
1019/** 1019/**
1020 * iscsi_get_flashnode_by_index -finds flashnode session entry by index 1020 * iscsi_get_flashnode_by_index -finds flashnode session entry by index
1021 * @shost: pointer to host data 1021 * @shost: pointer to host data
1022 * @data: pointer to data containing value to use for comparison 1022 * @idx: index to match
1023 * @fn: function pointer that does actual comparison
1024 * 1023 *
1025 * Finds the flashnode session object for the passed index 1024 * Finds the flashnode session object for the passed index
1026 * 1025 *
@@ -1029,13 +1028,13 @@ exit_match_index:
1029 * %NULL on failure 1028 * %NULL on failure
1030 */ 1029 */
1031static struct iscsi_bus_flash_session * 1030static struct iscsi_bus_flash_session *
1032iscsi_get_flashnode_by_index(struct Scsi_Host *shost, void *data, 1031iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx)
1033 int (*fn)(struct device *dev, void *data))
1034{ 1032{
1035 struct iscsi_bus_flash_session *fnode_sess = NULL; 1033 struct iscsi_bus_flash_session *fnode_sess = NULL;
1036 struct device *dev; 1034 struct device *dev;
1037 1035
1038 dev = device_find_child(&shost->shost_gendev, data, fn); 1036 dev = device_find_child(&shost->shost_gendev, &idx,
1037 flashnode_match_index);
1039 if (dev) 1038 if (dev)
1040 fnode_sess = iscsi_dev_to_flash_session(dev); 1039 fnode_sess = iscsi_dev_to_flash_session(dev);
1041 1040
@@ -1059,18 +1058,13 @@ struct device *
1059iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data, 1058iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
1060 int (*fn)(struct device *dev, void *data)) 1059 int (*fn)(struct device *dev, void *data))
1061{ 1060{
1062 struct device *dev; 1061 return device_find_child(&shost->shost_gendev, data, fn);
1063
1064 dev = device_find_child(&shost->shost_gendev, data, fn);
1065 return dev;
1066} 1062}
1067EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess); 1063EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
1068 1064
1069/** 1065/**
1070 * iscsi_find_flashnode_conn - finds flashnode connection entry 1066 * iscsi_find_flashnode_conn - finds flashnode connection entry
1071 * @fnode_sess: pointer to parent flashnode session entry 1067 * @fnode_sess: pointer to parent flashnode session entry
1072 * @data: pointer to data containing value to use for comparison
1073 * @fn: function pointer that does actual comparison
1074 * 1068 *
1075 * Finds the flashnode connection object comparing the data passed using logic 1069 * Finds the flashnode connection object comparing the data passed using logic
1076 * defined in passed function pointer 1070 * defined in passed function pointer
@@ -1080,14 +1074,10 @@ EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
1080 * %NULL on failure 1074 * %NULL on failure
1081 */ 1075 */
1082struct device * 1076struct device *
1083iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess, 1077iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess)
1084 void *data,
1085 int (*fn)(struct device *dev, void *data))
1086{ 1078{
1087 struct device *dev; 1079 return device_find_child(&fnode_sess->dev, NULL,
1088 1080 iscsi_is_flashnode_conn_dev);
1089 dev = device_find_child(&fnode_sess->dev, data, fn);
1090 return dev;
1091} 1081}
1092EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn); 1082EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn);
1093 1083
@@ -2808,7 +2798,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
2808 struct iscsi_bus_flash_session *fnode_sess; 2798 struct iscsi_bus_flash_session *fnode_sess;
2809 struct iscsi_bus_flash_conn *fnode_conn; 2799 struct iscsi_bus_flash_conn *fnode_conn;
2810 struct device *dev; 2800 struct device *dev;
2811 uint32_t *idx; 2801 uint32_t idx;
2812 int err = 0; 2802 int err = 0;
2813 2803
2814 if (!transport->set_flashnode_param) { 2804 if (!transport->set_flashnode_param) {
@@ -2824,25 +2814,27 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
2824 goto put_host; 2814 goto put_host;
2825 } 2815 }
2826 2816
2827 idx = &ev->u.set_flashnode.flashnode_idx; 2817 idx = ev->u.set_flashnode.flashnode_idx;
2828 fnode_sess = iscsi_get_flashnode_by_index(shost, idx, 2818 fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
2829 flashnode_match_index);
2830 if (!fnode_sess) { 2819 if (!fnode_sess) {
2831 pr_err("%s could not find flashnode %u for host no %u\n", 2820 pr_err("%s could not find flashnode %u for host no %u\n",
2832 __func__, *idx, ev->u.set_flashnode.host_no); 2821 __func__, idx, ev->u.set_flashnode.host_no);
2833 err = -ENODEV; 2822 err = -ENODEV;
2834 goto put_host; 2823 goto put_host;
2835 } 2824 }
2836 2825
2837 dev = iscsi_find_flashnode_conn(fnode_sess, NULL, 2826 dev = iscsi_find_flashnode_conn(fnode_sess);
2838 iscsi_is_flashnode_conn_dev);
2839 if (!dev) { 2827 if (!dev) {
2840 err = -ENODEV; 2828 err = -ENODEV;
2841 goto put_host; 2829 goto put_sess;
2842 } 2830 }
2843 2831
2844 fnode_conn = iscsi_dev_to_flash_conn(dev); 2832 fnode_conn = iscsi_dev_to_flash_conn(dev);
2845 err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len); 2833 err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len);
2834 put_device(dev);
2835
2836put_sess:
2837 put_device(&fnode_sess->dev);
2846 2838
2847put_host: 2839put_host:
2848 scsi_host_put(shost); 2840 scsi_host_put(shost);
@@ -2891,7 +2883,7 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
2891{ 2883{
2892 struct Scsi_Host *shost; 2884 struct Scsi_Host *shost;
2893 struct iscsi_bus_flash_session *fnode_sess; 2885 struct iscsi_bus_flash_session *fnode_sess;
2894 uint32_t *idx; 2886 uint32_t idx;
2895 int err = 0; 2887 int err = 0;
2896 2888
2897 if (!transport->del_flashnode) { 2889 if (!transport->del_flashnode) {
@@ -2907,17 +2899,17 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
2907 goto put_host; 2899 goto put_host;
2908 } 2900 }
2909 2901
2910 idx = &ev->u.del_flashnode.flashnode_idx; 2902 idx = ev->u.del_flashnode.flashnode_idx;
2911 fnode_sess = iscsi_get_flashnode_by_index(shost, idx, 2903 fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
2912 flashnode_match_index);
2913 if (!fnode_sess) { 2904 if (!fnode_sess) {
2914 pr_err("%s could not find flashnode %u for host no %u\n", 2905 pr_err("%s could not find flashnode %u for host no %u\n",
2915 __func__, *idx, ev->u.del_flashnode.host_no); 2906 __func__, idx, ev->u.del_flashnode.host_no);
2916 err = -ENODEV; 2907 err = -ENODEV;
2917 goto put_host; 2908 goto put_host;
2918 } 2909 }
2919 2910
2920 err = transport->del_flashnode(fnode_sess); 2911 err = transport->del_flashnode(fnode_sess);
2912 put_device(&fnode_sess->dev);
2921 2913
2922put_host: 2914put_host:
2923 scsi_host_put(shost); 2915 scsi_host_put(shost);
@@ -2933,7 +2925,7 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
2933 struct iscsi_bus_flash_session *fnode_sess; 2925 struct iscsi_bus_flash_session *fnode_sess;
2934 struct iscsi_bus_flash_conn *fnode_conn; 2926 struct iscsi_bus_flash_conn *fnode_conn;
2935 struct device *dev; 2927 struct device *dev;
2936 uint32_t *idx; 2928 uint32_t idx;
2937 int err = 0; 2929 int err = 0;
2938 2930
2939 if (!transport->login_flashnode) { 2931 if (!transport->login_flashnode) {
@@ -2949,25 +2941,27 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
2949 goto put_host; 2941 goto put_host;
2950 } 2942 }
2951 2943
2952 idx = &ev->u.login_flashnode.flashnode_idx; 2944 idx = ev->u.login_flashnode.flashnode_idx;
2953 fnode_sess = iscsi_get_flashnode_by_index(shost, idx, 2945 fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
2954 flashnode_match_index);
2955 if (!fnode_sess) { 2946 if (!fnode_sess) {
2956 pr_err("%s could not find flashnode %u for host no %u\n", 2947 pr_err("%s could not find flashnode %u for host no %u\n",
2957 __func__, *idx, ev->u.login_flashnode.host_no); 2948 __func__, idx, ev->u.login_flashnode.host_no);
2958 err = -ENODEV; 2949 err = -ENODEV;
2959 goto put_host; 2950 goto put_host;
2960 } 2951 }
2961 2952
2962 dev = iscsi_find_flashnode_conn(fnode_sess, NULL, 2953 dev = iscsi_find_flashnode_conn(fnode_sess);
2963 iscsi_is_flashnode_conn_dev);
2964 if (!dev) { 2954 if (!dev) {
2965 err = -ENODEV; 2955 err = -ENODEV;
2966 goto put_host; 2956 goto put_sess;
2967 } 2957 }
2968 2958
2969 fnode_conn = iscsi_dev_to_flash_conn(dev); 2959 fnode_conn = iscsi_dev_to_flash_conn(dev);
2970 err = transport->login_flashnode(fnode_sess, fnode_conn); 2960 err = transport->login_flashnode(fnode_sess, fnode_conn);
2961 put_device(dev);
2962
2963put_sess:
2964 put_device(&fnode_sess->dev);
2971 2965
2972put_host: 2966put_host:
2973 scsi_host_put(shost); 2967 scsi_host_put(shost);
@@ -2983,7 +2977,7 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
2983 struct iscsi_bus_flash_session *fnode_sess; 2977 struct iscsi_bus_flash_session *fnode_sess;
2984 struct iscsi_bus_flash_conn *fnode_conn; 2978 struct iscsi_bus_flash_conn *fnode_conn;
2985 struct device *dev; 2979 struct device *dev;
2986 uint32_t *idx; 2980 uint32_t idx;
2987 int err = 0; 2981 int err = 0;
2988 2982
2989 if (!transport->logout_flashnode) { 2983 if (!transport->logout_flashnode) {
@@ -2999,26 +2993,28 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
2999 goto put_host; 2993 goto put_host;
3000 } 2994 }
3001 2995
3002 idx = &ev->u.logout_flashnode.flashnode_idx; 2996 idx = ev->u.logout_flashnode.flashnode_idx;
3003 fnode_sess = iscsi_get_flashnode_by_index(shost, idx, 2997 fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
3004 flashnode_match_index);
3005 if (!fnode_sess) { 2998 if (!fnode_sess) {
3006 pr_err("%s could not find flashnode %u for host no %u\n", 2999 pr_err("%s could not find flashnode %u for host no %u\n",
3007 __func__, *idx, ev->u.logout_flashnode.host_no); 3000 __func__, idx, ev->u.logout_flashnode.host_no);
3008 err = -ENODEV; 3001 err = -ENODEV;
3009 goto put_host; 3002 goto put_host;
3010 } 3003 }
3011 3004
3012 dev = iscsi_find_flashnode_conn(fnode_sess, NULL, 3005 dev = iscsi_find_flashnode_conn(fnode_sess);
3013 iscsi_is_flashnode_conn_dev);
3014 if (!dev) { 3006 if (!dev) {
3015 err = -ENODEV; 3007 err = -ENODEV;
3016 goto put_host; 3008 goto put_sess;
3017 } 3009 }
3018 3010
3019 fnode_conn = iscsi_dev_to_flash_conn(dev); 3011 fnode_conn = iscsi_dev_to_flash_conn(dev);
3020 3012
3021 err = transport->logout_flashnode(fnode_sess, fnode_conn); 3013 err = transport->logout_flashnode(fnode_sess, fnode_conn);
3014 put_device(dev);
3015
3016put_sess:
3017 put_device(&fnode_sess->dev);
3022 3018
3023put_host: 3019put_host:
3024 scsi_host_put(shost); 3020 scsi_host_put(shost);
@@ -3985,8 +3981,10 @@ static __init int iscsi_transport_init(void)
3985 } 3981 }
3986 3982
3987 iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh"); 3983 iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
3988 if (!iscsi_eh_timer_workq) 3984 if (!iscsi_eh_timer_workq) {
3985 err = -ENOMEM;
3989 goto release_nls; 3986 goto release_nls;
3987 }
3990 3988
3991 return 0; 3989 return 0;
3992 3990
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e6689776b4f6..c1c555242d0d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -142,6 +142,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
142 char *buffer_data; 142 char *buffer_data;
143 struct scsi_mode_data data; 143 struct scsi_mode_data data;
144 struct scsi_sense_hdr sshdr; 144 struct scsi_sense_hdr sshdr;
145 const char *temp = "temporary ";
145 int len; 146 int len;
146 147
147 if (sdp->type != TYPE_DISK) 148 if (sdp->type != TYPE_DISK)
@@ -150,6 +151,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
150 * it's not worth the risk */ 151 * it's not worth the risk */
151 return -EINVAL; 152 return -EINVAL;
152 153
154 if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
155 buf += sizeof(temp) - 1;
156 sdkp->cache_override = 1;
157 } else {
158 sdkp->cache_override = 0;
159 }
160
153 for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) { 161 for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
154 len = strlen(sd_cache_types[i]); 162 len = strlen(sd_cache_types[i]);
155 if (strncmp(sd_cache_types[i], buf, len) == 0 && 163 if (strncmp(sd_cache_types[i], buf, len) == 0 &&
@@ -162,6 +170,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
162 return -EINVAL; 170 return -EINVAL;
163 rcd = ct & 0x01 ? 1 : 0; 171 rcd = ct & 0x01 ? 1 : 0;
164 wce = ct & 0x02 ? 1 : 0; 172 wce = ct & 0x02 ? 1 : 0;
173
174 if (sdkp->cache_override) {
175 sdkp->WCE = wce;
176 sdkp->RCD = rcd;
177 return count;
178 }
179
165 if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT, 180 if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
166 SD_MAX_RETRIES, &data, NULL)) 181 SD_MAX_RETRIES, &data, NULL))
167 return -EINVAL; 182 return -EINVAL;
@@ -1121,10 +1136,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
1121 1136
1122 sdev = sdkp->device; 1137 sdev = sdkp->device;
1123 1138
1124 retval = scsi_autopm_get_device(sdev);
1125 if (retval)
1126 goto error_autopm;
1127
1128 /* 1139 /*
1129 * If the device is in error recovery, wait until it is done. 1140 * If the device is in error recovery, wait until it is done.
1130 * If the device is offline, then disallow any access to it. 1141 * If the device is offline, then disallow any access to it.
@@ -1169,8 +1180,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
1169 return 0; 1180 return 0;
1170 1181
1171error_out: 1182error_out:
1172 scsi_autopm_put_device(sdev);
1173error_autopm:
1174 scsi_disk_put(sdkp); 1183 scsi_disk_put(sdkp);
1175 return retval; 1184 return retval;
1176} 1185}
@@ -1205,7 +1214,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
1205 * XXX is followed by a "rmmod sd_mod"? 1214 * XXX is followed by a "rmmod sd_mod"?
1206 */ 1215 */
1207 1216
1208 scsi_autopm_put_device(sdev);
1209 scsi_disk_put(sdkp); 1217 scsi_disk_put(sdkp);
1210} 1218}
1211 1219
@@ -1366,14 +1374,9 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
1366 retval = -ENODEV; 1374 retval = -ENODEV;
1367 1375
1368 if (scsi_block_when_processing_errors(sdp)) { 1376 if (scsi_block_when_processing_errors(sdp)) {
1369 retval = scsi_autopm_get_device(sdp);
1370 if (retval)
1371 goto out;
1372
1373 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 1377 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
1374 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES, 1378 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
1375 sshdr); 1379 sshdr);
1376 scsi_autopm_put_device(sdp);
1377 } 1380 }
1378 1381
1379 /* failed to execute TUR, assume media not present */ 1382 /* failed to execute TUR, assume media not present */
@@ -1423,8 +1426,9 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
1423 * Leave the rest of the command zero to indicate 1426 * Leave the rest of the command zero to indicate
1424 * flush everything. 1427 * flush everything.
1425 */ 1428 */
1426 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, 1429 res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
1427 SD_FLUSH_TIMEOUT, SD_MAX_RETRIES, NULL); 1430 &sshdr, SD_FLUSH_TIMEOUT,
1431 SD_MAX_RETRIES, NULL, REQ_PM);
1428 if (res == 0) 1432 if (res == 0)
1429 break; 1433 break;
1430 } 1434 }
@@ -2318,6 +2322,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2318 int old_rcd = sdkp->RCD; 2322 int old_rcd = sdkp->RCD;
2319 int old_dpofua = sdkp->DPOFUA; 2323 int old_dpofua = sdkp->DPOFUA;
2320 2324
2325
2326 if (sdkp->cache_override)
2327 return;
2328
2321 first_len = 4; 2329 first_len = 4;
2322 if (sdp->skip_ms_page_8) { 2330 if (sdp->skip_ms_page_8) {
2323 if (sdp->type == TYPE_RBC) 2331 if (sdp->type == TYPE_RBC)
@@ -2811,6 +2819,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2811 sdkp->capacity = 0; 2819 sdkp->capacity = 0;
2812 sdkp->media_present = 1; 2820 sdkp->media_present = 1;
2813 sdkp->write_prot = 0; 2821 sdkp->write_prot = 0;
2822 sdkp->cache_override = 0;
2814 sdkp->WCE = 0; 2823 sdkp->WCE = 0;
2815 sdkp->RCD = 0; 2824 sdkp->RCD = 0;
2816 sdkp->ATO = 0; 2825 sdkp->ATO = 0;
@@ -2837,6 +2846,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2837 2846
2838 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 2847 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
2839 sdp->removable ? "removable " : ""); 2848 sdp->removable ? "removable " : "");
2849 blk_pm_runtime_init(sdp->request_queue, dev);
2840 scsi_autopm_put_device(sdp); 2850 scsi_autopm_put_device(sdp);
2841 put_device(&sdkp->dev); 2851 put_device(&sdkp->dev);
2842} 2852}
@@ -3020,8 +3030,8 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
3020 if (!scsi_device_online(sdp)) 3030 if (!scsi_device_online(sdp))
3021 return -ENODEV; 3031 return -ENODEV;
3022 3032
3023 res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, 3033 res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
3024 SD_TIMEOUT, SD_MAX_RETRIES, NULL); 3034 SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM);
3025 if (res) { 3035 if (res) {
3026 sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n"); 3036 sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n");
3027 sd_print_result(sdkp, res); 3037 sd_print_result(sdkp, res);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 74a1e4ca5401..2386aeb41fe8 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -73,6 +73,7 @@ struct scsi_disk {
73 u8 protection_type;/* Data Integrity Field */ 73 u8 protection_type;/* Data Integrity Field */
74 u8 provisioning_mode; 74 u8 provisioning_mode;
75 unsigned ATO : 1; /* state of disk ATO bit */ 75 unsigned ATO : 1; /* state of disk ATO bit */
76 unsigned cache_override : 1; /* temp override of WCE,RCD */
76 unsigned WCE : 1; /* state of disk WCE bit */ 77 unsigned WCE : 1; /* state of disk WCE bit */
77 unsigned RCD : 1; /* state of disk RCD bit, unused */ 78 unsigned RCD : 1; /* state of disk RCD bit, unused */
78 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ 79 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 04998f36e507..6174ca4ea275 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -93,14 +93,6 @@ static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
93 if (sdt->app_tag == 0xffff) 93 if (sdt->app_tag == 0xffff)
94 return 0; 94 return 0;
95 95
96 /* Bad ref tag received from disk */
97 if (sdt->ref_tag == 0xffffffff) {
98 printk(KERN_ERR
99 "%s: bad phys ref tag on sector %lu\n",
100 bix->disk_name, (unsigned long)sector);
101 return -EIO;
102 }
103
104 if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 96 if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
105 printk(KERN_ERR 97 printk(KERN_ERR
106 "%s: ref tag error on sector %lu (rcvd %u)\n", 98 "%s: ref tag error on sector %lu (rcvd %u)\n",
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 0371047c5922..35faf24c6044 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -57,3 +57,14 @@ config SCSI_UFSHCD_PCI
57 If you have a controller with this interface, say Y or M here. 57 If you have a controller with this interface, say Y or M here.
58 58
59 If unsure, say N. 59 If unsure, say N.
60
61config SCSI_UFSHCD_PLATFORM
62 tristate "Platform bus based UFS Controller support"
63 depends on SCSI_UFSHCD
64 ---help---
65 This selects the UFS host controller support. Select this if
66 you have an UFS controller on Platform bus.
67
68 If you have a controller with this interface, say Y or M here.
69
70 If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 9eda0dfbd6df..1e5bd48457d6 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -1,3 +1,4 @@
1# UFSHCD makefile 1# UFSHCD makefile
2obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o 2obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
3obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o 3obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
4obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
new file mode 100644
index 000000000000..03319acd9c72
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -0,0 +1,217 @@
1/*
2 * Universal Flash Storage Host controller Platform bus based glue driver
3 *
4 * This code is based on drivers/scsi/ufs/ufshcd-pltfrm.c
5 * Copyright (C) 2011-2013 Samsung India Software Operations
6 *
7 * Authors:
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
15 * See the COPYING file in the top-level directory or visit
16 * <http://www.gnu.org/licenses/gpl-2.0.html>
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * This program is provided "AS IS" and "WITH ALL FAULTS" and
24 * without warranty of any kind. You are solely responsible for
25 * determining the appropriateness of using and distributing
26 * the program and assume all risks associated with your exercise
27 * of rights with respect to the program, including but not limited
28 * to infringement of third party rights, the risks and costs of
29 * program errors, damage to or loss of data, programs or equipment,
30 * and unavailability or interruption of operations. Under no
31 * circumstances will the contributor of this Program be liable for
32 * any damages of any kind arising from your use or distribution of
33 * this program.
34 */
35
36#include "ufshcd.h"
37#include <linux/platform_device.h>
38
39#ifdef CONFIG_PM
40/**
41 * ufshcd_pltfrm_suspend - suspend power management function
42 * @dev: pointer to device handle
43 *
44 *
45 * Returns 0
46 */
47static int ufshcd_pltfrm_suspend(struct device *dev)
48{
49 struct platform_device *pdev = to_platform_device(dev);
50 struct ufs_hba *hba = platform_get_drvdata(pdev);
51
52 /*
53 * TODO:
54 * 1. Call ufshcd_suspend
55 * 2. Do bus specific power management
56 */
57
58 disable_irq(hba->irq);
59
60 return 0;
61}
62
63/**
64 * ufshcd_pltfrm_resume - resume power management function
65 * @dev: pointer to device handle
66 *
67 * Returns 0
68 */
69static int ufshcd_pltfrm_resume(struct device *dev)
70{
71 struct platform_device *pdev = to_platform_device(dev);
72 struct ufs_hba *hba = platform_get_drvdata(pdev);
73
74 /*
75 * TODO:
76 * 1. Call ufshcd_resume.
77 * 2. Do bus specific wake up
78 */
79
80 enable_irq(hba->irq);
81
82 return 0;
83}
84#else
85#define ufshcd_pltfrm_suspend NULL
86#define ufshcd_pltfrm_resume NULL
87#endif
88
89/**
90 * ufshcd_pltfrm_probe - probe routine of the driver
91 * @pdev: pointer to Platform device handle
92 *
93 * Returns 0 on success, non-zero value on failure
94 */
95static int ufshcd_pltfrm_probe(struct platform_device *pdev)
96{
97 struct ufs_hba *hba;
98 void __iomem *mmio_base;
99 struct resource *mem_res;
100 struct resource *irq_res;
101 resource_size_t mem_size;
102 int err;
103 struct device *dev = &pdev->dev;
104
105 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
106 if (!mem_res) {
107 dev_err(&pdev->dev,
108 "Memory resource not available\n");
109 err = -ENODEV;
110 goto out_error;
111 }
112
113 mem_size = resource_size(mem_res);
114 if (!request_mem_region(mem_res->start, mem_size, "ufshcd")) {
115 dev_err(&pdev->dev,
116 "Cannot reserve the memory resource\n");
117 err = -EBUSY;
118 goto out_error;
119 }
120
121 mmio_base = ioremap_nocache(mem_res->start, mem_size);
122 if (!mmio_base) {
123 dev_err(&pdev->dev, "memory map failed\n");
124 err = -ENOMEM;
125 goto out_release_regions;
126 }
127
128 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
129 if (!irq_res) {
130 dev_err(&pdev->dev, "IRQ resource not available\n");
131 err = -ENODEV;
132 goto out_iounmap;
133 }
134
135 err = dma_set_coherent_mask(dev, dev->coherent_dma_mask);
136 if (err) {
137 dev_err(&pdev->dev, "set dma mask failed\n");
138 goto out_iounmap;
139 }
140
141 err = ufshcd_init(&pdev->dev, &hba, mmio_base, irq_res->start);
142 if (err) {
143 dev_err(&pdev->dev, "Intialization failed\n");
144 goto out_iounmap;
145 }
146
147 platform_set_drvdata(pdev, hba);
148
149 return 0;
150
151out_iounmap:
152 iounmap(mmio_base);
153out_release_regions:
154 release_mem_region(mem_res->start, mem_size);
155out_error:
156 return err;
157}
158
159/**
160 * ufshcd_pltfrm_remove - remove platform driver routine
161 * @pdev: pointer to platform device handle
162 *
163 * Returns 0 on success, non-zero value on failure
164 */
165static int ufshcd_pltfrm_remove(struct platform_device *pdev)
166{
167 struct resource *mem_res;
168 resource_size_t mem_size;
169 struct ufs_hba *hba = platform_get_drvdata(pdev);
170
171 disable_irq(hba->irq);
172
173 /* Some buggy controllers raise interrupt after
174 * the resources are removed. So first we unregister the
175 * irq handler and then the resources used by driver
176 */
177
178 free_irq(hba->irq, hba);
179 ufshcd_remove(hba);
180 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
181 if (!mem_res)
182 dev_err(&pdev->dev, "ufshcd: Memory resource not available\n");
183 else {
184 mem_size = resource_size(mem_res);
185 release_mem_region(mem_res->start, mem_size);
186 }
187 platform_set_drvdata(pdev, NULL);
188 return 0;
189}
190
191static const struct of_device_id ufs_of_match[] = {
192 { .compatible = "jedec,ufs-1.1"},
193};
194
195static const struct dev_pm_ops ufshcd_dev_pm_ops = {
196 .suspend = ufshcd_pltfrm_suspend,
197 .resume = ufshcd_pltfrm_resume,
198};
199
200static struct platform_driver ufshcd_pltfrm_driver = {
201 .probe = ufshcd_pltfrm_probe,
202 .remove = ufshcd_pltfrm_remove,
203 .driver = {
204 .name = "ufshcd",
205 .owner = THIS_MODULE,
206 .pm = &ufshcd_dev_pm_ops,
207 .of_match_table = ufs_of_match,
208 },
209};
210
211module_platform_driver(ufshcd_pltfrm_driver);
212
213MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
214MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
215MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver");
216MODULE_LICENSE("GPL");
217MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 60fd40c4e4c2..c32a478df81b 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -478,7 +478,7 @@ static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
478 ucd_cmd_ptr->header.dword_2 = 0; 478 ucd_cmd_ptr->header.dword_2 = 0;
479 479
480 ucd_cmd_ptr->exp_data_transfer_len = 480 ucd_cmd_ptr->exp_data_transfer_len =
481 cpu_to_be32(lrbp->cmd->transfersize); 481 cpu_to_be32(lrbp->cmd->sdb.length);
482 482
483 memcpy(ucd_cmd_ptr->cdb, 483 memcpy(ucd_cmd_ptr->cdb,
484 lrbp->cmd->cmnd, 484 lrbp->cmd->cmnd,
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 787bd2c22bca..380387a47b1d 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -526,13 +526,17 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
526 } 526 }
527 527
528 if (xfer->tx_buf) 528 if (xfer->tx_buf)
529 spi_writel(as, TDR, *(u8 *)(xfer->tx_buf)); 529 if (xfer->bits_per_word > 8)
530 spi_writel(as, TDR, *(u16 *)(xfer->tx_buf));
531 else
532 spi_writel(as, TDR, *(u8 *)(xfer->tx_buf));
530 else 533 else
531 spi_writel(as, TDR, 0); 534 spi_writel(as, TDR, 0);
532 535
533 dev_dbg(master->dev.parent, 536 dev_dbg(master->dev.parent,
534 " start pio xfer %p: len %u tx %p rx %p\n", 537 " start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
535 xfer, xfer->len, xfer->tx_buf, xfer->rx_buf); 538 xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
539 xfer->bits_per_word);
536 540
537 /* Enable relevant interrupts */ 541 /* Enable relevant interrupts */
538 spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES)); 542 spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES));
@@ -950,21 +954,39 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
950{ 954{
951 u8 *txp; 955 u8 *txp;
952 u8 *rxp; 956 u8 *rxp;
957 u16 *txp16;
958 u16 *rxp16;
953 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; 959 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
954 960
955 if (xfer->rx_buf) { 961 if (xfer->rx_buf) {
956 rxp = ((u8 *)xfer->rx_buf) + xfer_pos; 962 if (xfer->bits_per_word > 8) {
957 *rxp = spi_readl(as, RDR); 963 rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
964 *rxp16 = spi_readl(as, RDR);
965 } else {
966 rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
967 *rxp = spi_readl(as, RDR);
968 }
958 } else { 969 } else {
959 spi_readl(as, RDR); 970 spi_readl(as, RDR);
960 } 971 }
961 972 if (xfer->bits_per_word > 8) {
962 as->current_remaining_bytes--; 973 as->current_remaining_bytes -= 2;
974 if (as->current_remaining_bytes < 0)
975 as->current_remaining_bytes = 0;
976 } else {
977 as->current_remaining_bytes--;
978 }
963 979
964 if (as->current_remaining_bytes) { 980 if (as->current_remaining_bytes) {
965 if (xfer->tx_buf) { 981 if (xfer->tx_buf) {
966 txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1; 982 if (xfer->bits_per_word > 8) {
967 spi_writel(as, TDR, *txp); 983 txp16 = (u16 *)(((u8 *)xfer->tx_buf)
984 + xfer_pos + 2);
985 spi_writel(as, TDR, *txp16);
986 } else {
987 txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1;
988 spi_writel(as, TDR, *txp);
989 }
968 } else { 990 } else {
969 spi_writel(as, TDR, 0); 991 spi_writel(as, TDR, 0);
970 } 992 }
@@ -1378,9 +1400,16 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1378 } 1400 }
1379 } 1401 }
1380 1402
1403 if (xfer->bits_per_word > 8) {
1404 if (xfer->len % 2) {
1405 dev_dbg(&spi->dev, "buffer len should be 16 bits aligned\n");
1406 return -EINVAL;
1407 }
1408 }
1409
1381 /* FIXME implement these protocol options!! */ 1410 /* FIXME implement these protocol options!! */
1382 if (xfer->speed_hz) { 1411 if (xfer->speed_hz < spi->max_speed_hz) {
1383 dev_dbg(&spi->dev, "no protocol options yet\n"); 1412 dev_dbg(&spi->dev, "can't change speed in transfer\n");
1384 return -ENOPROTOOPT; 1413 return -ENOPROTOOPT;
1385 } 1414 }
1386 1415
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 2e8f24a1fb95..50b13c9b1ab6 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -784,7 +784,7 @@ static const struct of_device_id davinci_spi_of_match[] = {
784 }, 784 },
785 { }, 785 { },
786}; 786};
787MODULE_DEVICE_TABLE(of, davini_spi_of_match); 787MODULE_DEVICE_TABLE(of, davinci_spi_of_match);
788 788
789/** 789/**
790 * spi_davinci_get_pdata - Get platform data from DTS binding 790 * spi_davinci_get_pdata - Get platform data from DTS binding
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 163fd802b7ac..32b7bb111eb6 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -334,7 +334,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
334 spi->dev.parent = &master->dev; 334 spi->dev.parent = &master->dev;
335 spi->dev.bus = &spi_bus_type; 335 spi->dev.bus = &spi_bus_type;
336 spi->dev.release = spidev_release; 336 spi->dev.release = spidev_release;
337 spi->cs_gpio = -EINVAL; 337 spi->cs_gpio = -ENOENT;
338 device_initialize(&spi->dev); 338 device_initialize(&spi->dev);
339 return spi; 339 return spi;
340} 340}
@@ -1067,8 +1067,11 @@ static int of_spi_register_master(struct spi_master *master)
1067 nb = of_gpio_named_count(np, "cs-gpios"); 1067 nb = of_gpio_named_count(np, "cs-gpios");
1068 master->num_chipselect = max(nb, (int)master->num_chipselect); 1068 master->num_chipselect = max(nb, (int)master->num_chipselect);
1069 1069
1070 if (nb < 1) 1070 /* Return error only for an incorrectly formed cs-gpios property */
1071 if (nb == 0 || nb == -ENOENT)
1071 return 0; 1072 return 0;
1073 else if (nb < 0)
1074 return nb;
1072 1075
1073 cs = devm_kzalloc(&master->dev, 1076 cs = devm_kzalloc(&master->dev,
1074 sizeof(int) * master->num_chipselect, 1077 sizeof(int) * master->num_chipselect,
@@ -1079,7 +1082,7 @@ static int of_spi_register_master(struct spi_master *master)
1079 return -ENOMEM; 1082 return -ENOMEM;
1080 1083
1081 for (i = 0; i < master->num_chipselect; i++) 1084 for (i = 0; i < master->num_chipselect; i++)
1082 cs[i] = -EINVAL; 1085 cs[i] = -ENOENT;
1083 1086
1084 for (i = 0; i < nb; i++) 1087 for (i = 0; i < nb; i++)
1085 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 1088 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c
index ef2e08e9b590..5dc9c4bfa66e 100644
--- a/drivers/tty/serial/68328serial.c
+++ b/drivers/tty/serial/68328serial.c
@@ -14,7 +14,6 @@
14 * 2.4/2.5 port David McCullough 14 * 2.4/2.5 port David McCullough
15 */ 15 */
16 16
17#include <asm/dbg.h>
18#include <linux/module.h> 17#include <linux/module.h>
19#include <linux/errno.h> 18#include <linux/errno.h>
20#include <linux/serial.h> 19#include <linux/serial.h>
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 52a3ecd40421..6fa2ae77fffd 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -30,7 +30,6 @@
30#include <linux/serial.h> 30#include <linux/serial.h>
31#include <linux/serial_core.h> 31#include <linux/serial_core.h>
32 32
33#include <bcm63xx_clk.h>
34#include <bcm63xx_irq.h> 33#include <bcm63xx_irq.h>
35#include <bcm63xx_regs.h> 34#include <bcm63xx_regs.h>
36#include <bcm63xx_io.h> 35#include <bcm63xx_io.h>
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index 6953dc82850c..a4fdce74f883 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -60,24 +60,22 @@ static void tty_audit_buf_put(struct tty_audit_buf *buf)
60 tty_audit_buf_free(buf); 60 tty_audit_buf_free(buf);
61} 61}
62 62
63static void tty_audit_log(const char *description, struct task_struct *tsk, 63static void tty_audit_log(const char *description, int major, int minor,
64 kuid_t loginuid, unsigned sessionid, int major, 64 unsigned char *data, size_t size)
65 int minor, unsigned char *data, size_t size)
66{ 65{
67 struct audit_buffer *ab; 66 struct audit_buffer *ab;
67 struct task_struct *tsk = current;
68 uid_t uid = from_kuid(&init_user_ns, task_uid(tsk));
69 uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk));
70 u32 sessionid = audit_get_sessionid(tsk);
68 71
69 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY); 72 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY);
70 if (ab) { 73 if (ab) {
71 char name[sizeof(tsk->comm)]; 74 char name[sizeof(tsk->comm)];
72 kuid_t uid = task_uid(tsk); 75
73 76 audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d"
74 audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u " 77 " minor=%d comm=", description, tsk->pid, uid,
75 "major=%d minor=%d comm=", description, 78 loginuid, sessionid, major, minor);
76 tsk->pid,
77 from_kuid(&init_user_ns, uid),
78 from_kuid(&init_user_ns, loginuid),
79 sessionid,
80 major, minor);
81 get_task_comm(name, tsk); 79 get_task_comm(name, tsk);
82 audit_log_untrustedstring(ab, name); 80 audit_log_untrustedstring(ab, name);
83 audit_log_format(ab, " data="); 81 audit_log_format(ab, " data=");
@@ -90,11 +88,9 @@ static void tty_audit_log(const char *description, struct task_struct *tsk,
90 * tty_audit_buf_push - Push buffered data out 88 * tty_audit_buf_push - Push buffered data out
91 * 89 *
92 * Generate an audit message from the contents of @buf, which is owned by 90 * Generate an audit message from the contents of @buf, which is owned by
93 * @tsk with @loginuid. @buf->mutex must be locked. 91 * the current task. @buf->mutex must be locked.
94 */ 92 */
95static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid, 93static void tty_audit_buf_push(struct tty_audit_buf *buf)
96 unsigned int sessionid,
97 struct tty_audit_buf *buf)
98{ 94{
99 if (buf->valid == 0) 95 if (buf->valid == 0)
100 return; 96 return;
@@ -102,25 +98,11 @@ static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid,
102 buf->valid = 0; 98 buf->valid = 0;
103 return; 99 return;
104 } 100 }
105 tty_audit_log("tty", tsk, loginuid, sessionid, buf->major, buf->minor, 101 tty_audit_log("tty", buf->major, buf->minor, buf->data, buf->valid);
106 buf->data, buf->valid);
107 buf->valid = 0; 102 buf->valid = 0;
108} 103}
109 104
110/** 105/**
111 * tty_audit_buf_push_current - Push buffered data out
112 *
113 * Generate an audit message from the contents of @buf, which is owned by
114 * the current task. @buf->mutex must be locked.
115 */
116static void tty_audit_buf_push_current(struct tty_audit_buf *buf)
117{
118 kuid_t auid = audit_get_loginuid(current);
119 unsigned int sessionid = audit_get_sessionid(current);
120 tty_audit_buf_push(current, auid, sessionid, buf);
121}
122
123/**
124 * tty_audit_exit - Handle a task exit 106 * tty_audit_exit - Handle a task exit
125 * 107 *
126 * Make sure all buffered data is written out and deallocate the buffer. 108 * Make sure all buffered data is written out and deallocate the buffer.
@@ -130,15 +112,13 @@ void tty_audit_exit(void)
130{ 112{
131 struct tty_audit_buf *buf; 113 struct tty_audit_buf *buf;
132 114
133 spin_lock_irq(&current->sighand->siglock);
134 buf = current->signal->tty_audit_buf; 115 buf = current->signal->tty_audit_buf;
135 current->signal->tty_audit_buf = NULL; 116 current->signal->tty_audit_buf = NULL;
136 spin_unlock_irq(&current->sighand->siglock);
137 if (!buf) 117 if (!buf)
138 return; 118 return;
139 119
140 mutex_lock(&buf->mutex); 120 mutex_lock(&buf->mutex);
141 tty_audit_buf_push_current(buf); 121 tty_audit_buf_push(buf);
142 mutex_unlock(&buf->mutex); 122 mutex_unlock(&buf->mutex);
143 123
144 tty_audit_buf_put(buf); 124 tty_audit_buf_put(buf);
@@ -151,9 +131,8 @@ void tty_audit_exit(void)
151 */ 131 */
152void tty_audit_fork(struct signal_struct *sig) 132void tty_audit_fork(struct signal_struct *sig)
153{ 133{
154 spin_lock_irq(&current->sighand->siglock);
155 sig->audit_tty = current->signal->audit_tty; 134 sig->audit_tty = current->signal->audit_tty;
156 spin_unlock_irq(&current->sighand->siglock); 135 sig->audit_tty_log_passwd = current->signal->audit_tty_log_passwd;
157} 136}
158 137
159/** 138/**
@@ -163,20 +142,21 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch)
163{ 142{
164 struct tty_audit_buf *buf; 143 struct tty_audit_buf *buf;
165 int major, minor, should_audit; 144 int major, minor, should_audit;
145 unsigned long flags;
166 146
167 spin_lock_irq(&current->sighand->siglock); 147 spin_lock_irqsave(&current->sighand->siglock, flags);
168 should_audit = current->signal->audit_tty; 148 should_audit = current->signal->audit_tty;
169 buf = current->signal->tty_audit_buf; 149 buf = current->signal->tty_audit_buf;
170 if (buf) 150 if (buf)
171 atomic_inc(&buf->count); 151 atomic_inc(&buf->count);
172 spin_unlock_irq(&current->sighand->siglock); 152 spin_unlock_irqrestore(&current->sighand->siglock, flags);
173 153
174 major = tty->driver->major; 154 major = tty->driver->major;
175 minor = tty->driver->minor_start + tty->index; 155 minor = tty->driver->minor_start + tty->index;
176 if (buf) { 156 if (buf) {
177 mutex_lock(&buf->mutex); 157 mutex_lock(&buf->mutex);
178 if (buf->major == major && buf->minor == minor) 158 if (buf->major == major && buf->minor == minor)
179 tty_audit_buf_push_current(buf); 159 tty_audit_buf_push(buf);
180 mutex_unlock(&buf->mutex); 160 mutex_unlock(&buf->mutex);
181 tty_audit_buf_put(buf); 161 tty_audit_buf_put(buf);
182 } 162 }
@@ -187,24 +167,20 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch)
187 167
188 auid = audit_get_loginuid(current); 168 auid = audit_get_loginuid(current);
189 sessionid = audit_get_sessionid(current); 169 sessionid = audit_get_sessionid(current);
190 tty_audit_log("ioctl=TIOCSTI", current, auid, sessionid, major, 170 tty_audit_log("ioctl=TIOCSTI", major, minor, &ch, 1);
191 minor, &ch, 1);
192 } 171 }
193} 172}
194 173
195/** 174/**
196 * tty_audit_push_task - Flush task's pending audit data 175 * tty_audit_push_current - Flush current's pending audit data
197 * @tsk: task pointer
198 * @loginuid: sender login uid
199 * @sessionid: sender session id
200 * 176 *
201 * Called with a ref on @tsk held. Try to lock sighand and get a 177 * Try to lock sighand and get a reference to the tty audit buffer if available.
202 * reference to the tty audit buffer if available.
203 * Flush the buffer or return an appropriate error code. 178 * Flush the buffer or return an appropriate error code.
204 */ 179 */
205int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid) 180int tty_audit_push_current(void)
206{ 181{
207 struct tty_audit_buf *buf = ERR_PTR(-EPERM); 182 struct tty_audit_buf *buf = ERR_PTR(-EPERM);
183 struct task_struct *tsk = current;
208 unsigned long flags; 184 unsigned long flags;
209 185
210 if (!lock_task_sighand(tsk, &flags)) 186 if (!lock_task_sighand(tsk, &flags))
@@ -225,7 +201,7 @@ int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid)
225 return PTR_ERR(buf); 201 return PTR_ERR(buf);
226 202
227 mutex_lock(&buf->mutex); 203 mutex_lock(&buf->mutex);
228 tty_audit_buf_push(tsk, loginuid, sessionid, buf); 204 tty_audit_buf_push(buf);
229 mutex_unlock(&buf->mutex); 205 mutex_unlock(&buf->mutex);
230 206
231 tty_audit_buf_put(buf); 207 tty_audit_buf_put(buf);
@@ -243,10 +219,11 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
243 unsigned icanon) 219 unsigned icanon)
244{ 220{
245 struct tty_audit_buf *buf, *buf2; 221 struct tty_audit_buf *buf, *buf2;
222 unsigned long flags;
246 223
247 buf = NULL; 224 buf = NULL;
248 buf2 = NULL; 225 buf2 = NULL;
249 spin_lock_irq(&current->sighand->siglock); 226 spin_lock_irqsave(&current->sighand->siglock, flags);
250 if (likely(!current->signal->audit_tty)) 227 if (likely(!current->signal->audit_tty))
251 goto out; 228 goto out;
252 buf = current->signal->tty_audit_buf; 229 buf = current->signal->tty_audit_buf;
@@ -254,7 +231,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
254 atomic_inc(&buf->count); 231 atomic_inc(&buf->count);
255 goto out; 232 goto out;
256 } 233 }
257 spin_unlock_irq(&current->sighand->siglock); 234 spin_unlock_irqrestore(&current->sighand->siglock, flags);
258 235
259 buf2 = tty_audit_buf_alloc(tty->driver->major, 236 buf2 = tty_audit_buf_alloc(tty->driver->major,
260 tty->driver->minor_start + tty->index, 237 tty->driver->minor_start + tty->index,
@@ -264,7 +241,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
264 return NULL; 241 return NULL;
265 } 242 }
266 243
267 spin_lock_irq(&current->sighand->siglock); 244 spin_lock_irqsave(&current->sighand->siglock, flags);
268 if (!current->signal->audit_tty) 245 if (!current->signal->audit_tty)
269 goto out; 246 goto out;
270 buf = current->signal->tty_audit_buf; 247 buf = current->signal->tty_audit_buf;
@@ -276,7 +253,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
276 atomic_inc(&buf->count); 253 atomic_inc(&buf->count);
277 /* Fall through */ 254 /* Fall through */
278 out: 255 out:
279 spin_unlock_irq(&current->sighand->siglock); 256 spin_unlock_irqrestore(&current->sighand->siglock, flags);
280 if (buf2) 257 if (buf2)
281 tty_audit_buf_free(buf2); 258 tty_audit_buf_free(buf2);
282 return buf; 259 return buf;
@@ -292,10 +269,18 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
292{ 269{
293 struct tty_audit_buf *buf; 270 struct tty_audit_buf *buf;
294 int major, minor; 271 int major, minor;
272 int audit_log_tty_passwd;
273 unsigned long flags;
295 274
296 if (unlikely(size == 0)) 275 if (unlikely(size == 0))
297 return; 276 return;
298 277
278 spin_lock_irqsave(&current->sighand->siglock, flags);
279 audit_log_tty_passwd = current->signal->audit_tty_log_passwd;
280 spin_unlock_irqrestore(&current->sighand->siglock, flags);
281 if (!audit_log_tty_passwd && icanon && !L_ECHO(tty))
282 return;
283
299 if (tty->driver->type == TTY_DRIVER_TYPE_PTY 284 if (tty->driver->type == TTY_DRIVER_TYPE_PTY
300 && tty->driver->subtype == PTY_TYPE_MASTER) 285 && tty->driver->subtype == PTY_TYPE_MASTER)
301 return; 286 return;
@@ -309,7 +294,7 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
309 minor = tty->driver->minor_start + tty->index; 294 minor = tty->driver->minor_start + tty->index;
310 if (buf->major != major || buf->minor != minor 295 if (buf->major != major || buf->minor != minor
311 || buf->icanon != icanon) { 296 || buf->icanon != icanon) {
312 tty_audit_buf_push_current(buf); 297 tty_audit_buf_push(buf);
313 buf->major = major; 298 buf->major = major;
314 buf->minor = minor; 299 buf->minor = minor;
315 buf->icanon = icanon; 300 buf->icanon = icanon;
@@ -325,7 +310,7 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
325 data += run; 310 data += run;
326 size -= run; 311 size -= run;
327 if (buf->valid == N_TTY_BUF_SIZE) 312 if (buf->valid == N_TTY_BUF_SIZE)
328 tty_audit_buf_push_current(buf); 313 tty_audit_buf_push(buf);
329 } while (size != 0); 314 } while (size != 0);
330 mutex_unlock(&buf->mutex); 315 mutex_unlock(&buf->mutex);
331 tty_audit_buf_put(buf); 316 tty_audit_buf_put(buf);
@@ -339,16 +324,17 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
339void tty_audit_push(struct tty_struct *tty) 324void tty_audit_push(struct tty_struct *tty)
340{ 325{
341 struct tty_audit_buf *buf; 326 struct tty_audit_buf *buf;
327 unsigned long flags;
342 328
343 spin_lock_irq(&current->sighand->siglock); 329 spin_lock_irqsave(&current->sighand->siglock, flags);
344 if (likely(!current->signal->audit_tty)) { 330 if (likely(!current->signal->audit_tty)) {
345 spin_unlock_irq(&current->sighand->siglock); 331 spin_unlock_irqrestore(&current->sighand->siglock, flags);
346 return; 332 return;
347 } 333 }
348 buf = current->signal->tty_audit_buf; 334 buf = current->signal->tty_audit_buf;
349 if (buf) 335 if (buf)
350 atomic_inc(&buf->count); 336 atomic_inc(&buf->count);
351 spin_unlock_irq(&current->sighand->siglock); 337 spin_unlock_irqrestore(&current->sighand->siglock, flags);
352 338
353 if (buf) { 339 if (buf) {
354 int major, minor; 340 int major, minor;
@@ -357,7 +343,7 @@ void tty_audit_push(struct tty_struct *tty)
357 minor = tty->driver->minor_start + tty->index; 343 minor = tty->driver->minor_start + tty->index;
358 mutex_lock(&buf->mutex); 344 mutex_lock(&buf->mutex);
359 if (buf->major == major && buf->minor == minor) 345 if (buf->major == major && buf->minor == minor)
360 tty_audit_buf_push_current(buf); 346 tty_audit_buf_push(buf);
361 mutex_unlock(&buf->mutex); 347 mutex_unlock(&buf->mutex);
362 tty_audit_buf_put(buf); 348 tty_audit_buf_put(buf);
363 } 349 }
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index bff0775e258c..5174ebac288d 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Since these may be in userspace, we use (inline) accessors. 4 * Since these may be in userspace, we use (inline) accessors.
5 */ 5 */
6#include <linux/module.h>
6#include <linux/vringh.h> 7#include <linux/vringh.h>
7#include <linux/virtio_ring.h> 8#include <linux/virtio_ring.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
@@ -1005,3 +1006,5 @@ int vringh_need_notify_kern(struct vringh *vrh)
1005 return __vringh_need_notify(vrh, getu16_kern); 1006 return __vringh_need_notify(vrh, getu16_kern);
1006} 1007}
1007EXPORT_SYMBOL(vringh_need_notify_kern); 1008EXPORT_SYMBOL(vringh_need_notify_kern);
1009
1010MODULE_LICENSE("GPL");
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index ddabaa867b0d..700cac067b46 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -111,30 +111,16 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi)
111 switch (blank_mode) { 111 switch (blank_mode) {
112 112
113 case VESA_NO_BLANKING: 113 case VESA_NO_BLANKING:
114 /* Turn on panel */ 114 /* Turn on panel */
115 fbdev->regs->lcd_control |= LCD_CONTROL_GO; 115 fbdev->regs->lcd_control |= LCD_CONTROL_GO;
116#ifdef CONFIG_MIPS_PB1100
117 if (fbdev->panel_idx == 1) {
118 au_writew(au_readw(PB1100_G_CONTROL)
119 | (PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD),
120 PB1100_G_CONTROL);
121 }
122#endif
123 au_sync(); 116 au_sync();
124 break; 117 break;
125 118
126 case VESA_VSYNC_SUSPEND: 119 case VESA_VSYNC_SUSPEND:
127 case VESA_HSYNC_SUSPEND: 120 case VESA_HSYNC_SUSPEND:
128 case VESA_POWERDOWN: 121 case VESA_POWERDOWN:
129 /* Turn off panel */ 122 /* Turn off panel */
130 fbdev->regs->lcd_control &= ~LCD_CONTROL_GO; 123 fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
131#ifdef CONFIG_MIPS_PB1100
132 if (fbdev->panel_idx == 1) {
133 au_writew(au_readw(PB1100_G_CONTROL)
134 & ~(PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD),
135 PB1100_G_CONTROL);
136 }
137#endif
138 au_sync(); 124 au_sync();
139 break; 125 break;
140 default: 126 default:
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index dd4d9cb86243..f03bf501527f 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -141,7 +141,7 @@ config XEN_GRANT_DEV_ALLOC
141 141
142config SWIOTLB_XEN 142config SWIOTLB_XEN
143 def_bool y 143 def_bool y
144 depends on PCI 144 depends on PCI && X86
145 select SWIOTLB 145 select SWIOTLB
146 146
147config XEN_TMEM 147config XEN_TMEM
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index d8cc8127f19c..6a6bbe4ede92 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -167,6 +167,8 @@ static void xen_irq_info_common_init(struct irq_info *info,
167 info->cpu = cpu; 167 info->cpu = cpu;
168 168
169 evtchn_to_irq[evtchn] = irq; 169 evtchn_to_irq[evtchn] = irq;
170
171 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
170} 172}
171 173
172static void xen_irq_info_evtchn_init(unsigned irq, 174static void xen_irq_info_evtchn_init(unsigned irq,
@@ -874,7 +876,6 @@ int bind_evtchn_to_irq(unsigned int evtchn)
874 struct irq_info *info = info_for_irq(irq); 876 struct irq_info *info = info_for_irq(irq);
875 WARN_ON(info == NULL || info->type != IRQT_EVTCHN); 877 WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
876 } 878 }
877 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
878 879
879out: 880out:
880 mutex_unlock(&irq_mapping_update_lock); 881 mutex_unlock(&irq_mapping_update_lock);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index d5c25db4398f..f71ec125290d 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -243,7 +243,7 @@ void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
243 struct ecryptfs_key_sig *key_sig, *key_sig_tmp; 243 struct ecryptfs_key_sig *key_sig, *key_sig_tmp;
244 244
245 if (crypt_stat->tfm) 245 if (crypt_stat->tfm)
246 crypto_free_blkcipher(crypt_stat->tfm); 246 crypto_free_ablkcipher(crypt_stat->tfm);
247 if (crypt_stat->hash_tfm) 247 if (crypt_stat->hash_tfm)
248 crypto_free_hash(crypt_stat->hash_tfm); 248 crypto_free_hash(crypt_stat->hash_tfm);
249 list_for_each_entry_safe(key_sig, key_sig_tmp, 249 list_for_each_entry_safe(key_sig, key_sig_tmp,
@@ -319,6 +319,22 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
319 return i; 319 return i;
320} 320}
321 321
322struct extent_crypt_result {
323 struct completion completion;
324 int rc;
325};
326
327static void extent_crypt_complete(struct crypto_async_request *req, int rc)
328{
329 struct extent_crypt_result *ecr = req->data;
330
331 if (rc == -EINPROGRESS)
332 return;
333
334 ecr->rc = rc;
335 complete(&ecr->completion);
336}
337
322/** 338/**
323 * encrypt_scatterlist 339 * encrypt_scatterlist
324 * @crypt_stat: Pointer to the crypt_stat struct to initialize. 340 * @crypt_stat: Pointer to the crypt_stat struct to initialize.
@@ -334,11 +350,8 @@ static int encrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
334 struct scatterlist *src_sg, int size, 350 struct scatterlist *src_sg, int size,
335 unsigned char *iv) 351 unsigned char *iv)
336{ 352{
337 struct blkcipher_desc desc = { 353 struct ablkcipher_request *req = NULL;
338 .tfm = crypt_stat->tfm, 354 struct extent_crypt_result ecr;
339 .info = iv,
340 .flags = CRYPTO_TFM_REQ_MAY_SLEEP
341 };
342 int rc = 0; 355 int rc = 0;
343 356
344 BUG_ON(!crypt_stat || !crypt_stat->tfm 357 BUG_ON(!crypt_stat || !crypt_stat->tfm
@@ -349,24 +362,47 @@ static int encrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
349 ecryptfs_dump_hex(crypt_stat->key, 362 ecryptfs_dump_hex(crypt_stat->key,
350 crypt_stat->key_size); 363 crypt_stat->key_size);
351 } 364 }
352 /* Consider doing this once, when the file is opened */ 365
366 init_completion(&ecr.completion);
367
353 mutex_lock(&crypt_stat->cs_tfm_mutex); 368 mutex_lock(&crypt_stat->cs_tfm_mutex);
354 if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) { 369 req = ablkcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
355 rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key, 370 if (!req) {
356 crypt_stat->key_size);
357 crypt_stat->flags |= ECRYPTFS_KEY_SET;
358 }
359 if (rc) {
360 ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n",
361 rc);
362 mutex_unlock(&crypt_stat->cs_tfm_mutex); 371 mutex_unlock(&crypt_stat->cs_tfm_mutex);
363 rc = -EINVAL; 372 rc = -ENOMEM;
364 goto out; 373 goto out;
365 } 374 }
366 ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes.\n", size); 375
367 crypto_blkcipher_encrypt_iv(&desc, dest_sg, src_sg, size); 376 ablkcipher_request_set_callback(req,
377 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
378 extent_crypt_complete, &ecr);
379 /* Consider doing this once, when the file is opened */
380 if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
381 rc = crypto_ablkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
382 crypt_stat->key_size);
383 if (rc) {
384 ecryptfs_printk(KERN_ERR,
385 "Error setting key; rc = [%d]\n",
386 rc);
387 mutex_unlock(&crypt_stat->cs_tfm_mutex);
388 rc = -EINVAL;
389 goto out;
390 }
391 crypt_stat->flags |= ECRYPTFS_KEY_SET;
392 }
368 mutex_unlock(&crypt_stat->cs_tfm_mutex); 393 mutex_unlock(&crypt_stat->cs_tfm_mutex);
394 ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes.\n", size);
395 ablkcipher_request_set_crypt(req, src_sg, dest_sg, size, iv);
396 rc = crypto_ablkcipher_encrypt(req);
397 if (rc == -EINPROGRESS || rc == -EBUSY) {
398 struct extent_crypt_result *ecr = req->base.data;
399
400 wait_for_completion(&ecr->completion);
401 rc = ecr->rc;
402 INIT_COMPLETION(ecr->completion);
403 }
369out: 404out:
405 ablkcipher_request_free(req);
370 return rc; 406 return rc;
371} 407}
372 408
@@ -624,35 +660,61 @@ static int decrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
624 struct scatterlist *src_sg, int size, 660 struct scatterlist *src_sg, int size,
625 unsigned char *iv) 661 unsigned char *iv)
626{ 662{
627 struct blkcipher_desc desc = { 663 struct ablkcipher_request *req = NULL;
628 .tfm = crypt_stat->tfm, 664 struct extent_crypt_result ecr;
629 .info = iv,
630 .flags = CRYPTO_TFM_REQ_MAY_SLEEP
631 };
632 int rc = 0; 665 int rc = 0;
633 666
634 /* Consider doing this once, when the file is opened */ 667 BUG_ON(!crypt_stat || !crypt_stat->tfm
668 || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
669 if (unlikely(ecryptfs_verbosity > 0)) {
670 ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
671 crypt_stat->key_size);
672 ecryptfs_dump_hex(crypt_stat->key,
673 crypt_stat->key_size);
674 }
675
676 init_completion(&ecr.completion);
677
635 mutex_lock(&crypt_stat->cs_tfm_mutex); 678 mutex_lock(&crypt_stat->cs_tfm_mutex);
636 rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key, 679 req = ablkcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
637 crypt_stat->key_size); 680 if (!req) {
638 if (rc) {
639 ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n",
640 rc);
641 mutex_unlock(&crypt_stat->cs_tfm_mutex); 681 mutex_unlock(&crypt_stat->cs_tfm_mutex);
642 rc = -EINVAL; 682 rc = -ENOMEM;
643 goto out; 683 goto out;
644 } 684 }
645 ecryptfs_printk(KERN_DEBUG, "Decrypting [%d] bytes.\n", size); 685
646 rc = crypto_blkcipher_decrypt_iv(&desc, dest_sg, src_sg, size); 686 ablkcipher_request_set_callback(req,
687 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
688 extent_crypt_complete, &ecr);
689 /* Consider doing this once, when the file is opened */
690 if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
691 rc = crypto_ablkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
692 crypt_stat->key_size);
693 if (rc) {
694 ecryptfs_printk(KERN_ERR,
695 "Error setting key; rc = [%d]\n",
696 rc);
697 mutex_unlock(&crypt_stat->cs_tfm_mutex);
698 rc = -EINVAL;
699 goto out;
700 }
701 crypt_stat->flags |= ECRYPTFS_KEY_SET;
702 }
647 mutex_unlock(&crypt_stat->cs_tfm_mutex); 703 mutex_unlock(&crypt_stat->cs_tfm_mutex);
648 if (rc) { 704 ecryptfs_printk(KERN_DEBUG, "Decrypting [%d] bytes.\n", size);
649 ecryptfs_printk(KERN_ERR, "Error decrypting; rc = [%d]\n", 705 ablkcipher_request_set_crypt(req, src_sg, dest_sg, size, iv);
650 rc); 706 rc = crypto_ablkcipher_decrypt(req);
651 goto out; 707 if (rc == -EINPROGRESS || rc == -EBUSY) {
708 struct extent_crypt_result *ecr = req->base.data;
709
710 wait_for_completion(&ecr->completion);
711 rc = ecr->rc;
712 INIT_COMPLETION(ecr->completion);
652 } 713 }
653 rc = size;
654out: 714out:
715 ablkcipher_request_free(req);
655 return rc; 716 return rc;
717
656} 718}
657 719
658/** 720/**
@@ -746,8 +808,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
746 crypt_stat->cipher, "cbc"); 808 crypt_stat->cipher, "cbc");
747 if (rc) 809 if (rc)
748 goto out_unlock; 810 goto out_unlock;
749 crypt_stat->tfm = crypto_alloc_blkcipher(full_alg_name, 0, 811 crypt_stat->tfm = crypto_alloc_ablkcipher(full_alg_name, 0, 0);
750 CRYPTO_ALG_ASYNC);
751 kfree(full_alg_name); 812 kfree(full_alg_name);
752 if (IS_ERR(crypt_stat->tfm)) { 813 if (IS_ERR(crypt_stat->tfm)) {
753 rc = PTR_ERR(crypt_stat->tfm); 814 rc = PTR_ERR(crypt_stat->tfm);
@@ -757,7 +818,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
757 crypt_stat->cipher); 818 crypt_stat->cipher);
758 goto out_unlock; 819 goto out_unlock;
759 } 820 }
760 crypto_blkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY); 821 crypto_ablkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
761 rc = 0; 822 rc = 0;
762out_unlock: 823out_unlock:
763 mutex_unlock(&crypt_stat->cs_tfm_mutex); 824 mutex_unlock(&crypt_stat->cs_tfm_mutex);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index dd299b389d4e..f622a733f7ad 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -38,6 +38,7 @@
38#include <linux/nsproxy.h> 38#include <linux/nsproxy.h>
39#include <linux/backing-dev.h> 39#include <linux/backing-dev.h>
40#include <linux/ecryptfs.h> 40#include <linux/ecryptfs.h>
41#include <linux/crypto.h>
41 42
42#define ECRYPTFS_DEFAULT_IV_BYTES 16 43#define ECRYPTFS_DEFAULT_IV_BYTES 16
43#define ECRYPTFS_DEFAULT_EXTENT_SIZE 4096 44#define ECRYPTFS_DEFAULT_EXTENT_SIZE 4096
@@ -233,7 +234,7 @@ struct ecryptfs_crypt_stat {
233 size_t extent_shift; 234 size_t extent_shift;
234 unsigned int extent_mask; 235 unsigned int extent_mask;
235 struct ecryptfs_mount_crypt_stat *mount_crypt_stat; 236 struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
236 struct crypto_blkcipher *tfm; 237 struct crypto_ablkcipher *tfm;
237 struct crypto_hash *hash_tfm; /* Crypto context for generating 238 struct crypto_hash *hash_tfm; /* Crypto context for generating
238 * the initialization vectors */ 239 * the initialization vectors */
239 unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE]; 240 unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE];
diff --git a/fs/namei.c b/fs/namei.c
index 57ae9c8c66bf..85e40d1c0a8f 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2740,7 +2740,7 @@ static int do_last(struct nameidata *nd, struct path *path,
2740 if (error) 2740 if (error)
2741 return error; 2741 return error;
2742 2742
2743 audit_inode(name, dir, 0); 2743 audit_inode(name, dir, LOOKUP_PARENT);
2744 error = -EISDIR; 2744 error = -EISDIR;
2745 /* trailing slashes? */ 2745 /* trailing slashes? */
2746 if (nd->last.name[nd->last.len]) 2746 if (nd->last.name[nd->last.len])
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 8ae5abfe6ba2..27d74a294515 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -279,6 +279,7 @@ do_open_fhandle(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, str
279{ 279{
280 struct svc_fh *current_fh = &cstate->current_fh; 280 struct svc_fh *current_fh = &cstate->current_fh;
281 __be32 status; 281 __be32 status;
282 int accmode = 0;
282 283
283 /* We don't know the target directory, and therefore can not 284 /* We don't know the target directory, and therefore can not
284 * set the change info 285 * set the change info
@@ -290,9 +291,19 @@ do_open_fhandle(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, str
290 291
291 open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) && 292 open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
292 (open->op_iattr.ia_size == 0); 293 (open->op_iattr.ia_size == 0);
294 /*
295 * In the delegation case, the client is telling us about an
296 * open that it *already* performed locally, some time ago. We
297 * should let it succeed now if possible.
298 *
299 * In the case of a CLAIM_FH open, on the other hand, the client
300 * may be counting on us to enforce permissions (the Linux 4.1
301 * client uses this for normal opens, for example).
302 */
303 if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH)
304 accmode = NFSD_MAY_OWNER_OVERRIDE;
293 305
294 status = do_open_permission(rqstp, current_fh, open, 306 status = do_open_permission(rqstp, current_fh, open, accmode);
295 NFSD_MAY_OWNER_OVERRIDE);
296 307
297 return status; 308 return status;
298} 309}
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 899ca26dd194..4e9a21db867a 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -146,7 +146,7 @@ out_no_tfm:
146 * then disable recovery tracking. 146 * then disable recovery tracking.
147 */ 147 */
148static void 148static void
149legacy_recdir_name_error(int error) 149legacy_recdir_name_error(struct nfs4_client *clp, int error)
150{ 150{
151 printk(KERN_ERR "NFSD: unable to generate recoverydir " 151 printk(KERN_ERR "NFSD: unable to generate recoverydir "
152 "name (%d).\n", error); 152 "name (%d).\n", error);
@@ -159,9 +159,7 @@ legacy_recdir_name_error(int error)
159 if (error == -ENOENT) { 159 if (error == -ENOENT) {
160 printk(KERN_ERR "NFSD: disabling legacy clientid tracking. " 160 printk(KERN_ERR "NFSD: disabling legacy clientid tracking. "
161 "Reboot recovery will not function correctly!\n"); 161 "Reboot recovery will not function correctly!\n");
162 162 nfsd4_client_tracking_exit(clp->net);
163 /* the argument is ignored by the legacy exit function */
164 nfsd4_client_tracking_exit(NULL);
165 } 163 }
166} 164}
167 165
@@ -184,7 +182,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
184 182
185 status = nfs4_make_rec_clidname(dname, &clp->cl_name); 183 status = nfs4_make_rec_clidname(dname, &clp->cl_name);
186 if (status) 184 if (status)
187 return legacy_recdir_name_error(status); 185 return legacy_recdir_name_error(clp, status);
188 186
189 status = nfs4_save_creds(&original_cred); 187 status = nfs4_save_creds(&original_cred);
190 if (status < 0) 188 if (status < 0)
@@ -341,7 +339,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
341 339
342 status = nfs4_make_rec_clidname(dname, &clp->cl_name); 340 status = nfs4_make_rec_clidname(dname, &clp->cl_name);
343 if (status) 341 if (status)
344 return legacy_recdir_name_error(status); 342 return legacy_recdir_name_error(clp, status);
345 343
346 status = mnt_want_write_file(nn->rec_file); 344 status = mnt_want_write_file(nn->rec_file);
347 if (status) 345 if (status)
@@ -601,7 +599,7 @@ nfsd4_check_legacy_client(struct nfs4_client *clp)
601 599
602 status = nfs4_make_rec_clidname(dname, &clp->cl_name); 600 status = nfs4_make_rec_clidname(dname, &clp->cl_name);
603 if (status) { 601 if (status) {
604 legacy_recdir_name_error(status); 602 legacy_recdir_name_error(clp, status);
605 return status; 603 return status;
606 } 604 }
607 605
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index d0be29fa94cf..6c80083a984f 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -13,6 +13,7 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/uaccess.h> 15#include <linux/uaccess.h>
16#include <linux/compat.h>
16 17
17#include <asm/ioctls.h> 18#include <asm/ioctls.h>
18 19
@@ -857,6 +858,22 @@ fput_and_out:
857 return ret; 858 return ret;
858} 859}
859 860
861#ifdef CONFIG_COMPAT
862COMPAT_SYSCALL_DEFINE6(fanotify_mark,
863 int, fanotify_fd, unsigned int, flags,
864 __u32, mask0, __u32, mask1, int, dfd,
865 const char __user *, pathname)
866{
867 return sys_fanotify_mark(fanotify_fd, flags,
868#ifdef __BIG_ENDIAN
869 ((__u64)mask1 << 32) | mask0,
870#else
871 ((__u64)mask0 << 32) | mask1,
872#endif
873 dfd, pathname);
874}
875#endif
876
860/* 877/*
861 * fanotify_user_setup - Our initialization function. Note that we cannot return 878 * fanotify_user_setup - Our initialization function. Note that we cannot return
862 * error because we have compiled-in VFS hooks. So an (unlikely) failure here 879 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
diff --git a/fs/romfs/mmap-nommu.c b/fs/romfs/mmap-nommu.c
index e1a7779dd3cb..f373bde8f545 100644
--- a/fs/romfs/mmap-nommu.c
+++ b/fs/romfs/mmap-nommu.c
@@ -49,8 +49,11 @@ static unsigned long romfs_get_unmapped_area(struct file *file,
49 return (unsigned long) -EINVAL; 49 return (unsigned long) -EINVAL;
50 50
51 offset += ROMFS_I(inode)->i_dataoffset; 51 offset += ROMFS_I(inode)->i_dataoffset;
52 if (offset > mtd->size - len) 52 if (offset >= mtd->size)
53 return (unsigned long) -EINVAL; 53 return (unsigned long) -EINVAL;
54 /* the mapping mustn't extend beyond the EOF */
55 if ((offset + len) > mtd->size)
56 len = mtd->size - offset;
54 57
55 ret = mtd_get_unmapped_area(mtd, len, offset, flags); 58 ret = mtd_get_unmapped_area(mtd, len, offset, flags);
56 if (ret == -EOPNOTSUPP) 59 if (ret == -EOPNOTSUPP)
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 61196592152e..63d17ee9eb48 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -316,6 +316,7 @@ struct drm_ioctl_desc {
316 int flags; 316 int flags;
317 drm_ioctl_t *func; 317 drm_ioctl_t *func;
318 unsigned int cmd_drv; 318 unsigned int cmd_drv;
319 const char *name;
319}; 320};
320 321
321/** 322/**
@@ -324,7 +325,7 @@ struct drm_ioctl_desc {
324 */ 325 */
325 326
326#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ 327#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
327 [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl} 328 [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
328 329
329struct drm_magic_entry { 330struct drm_magic_entry {
330 struct list_head head; 331 struct list_head head;
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 8230b46fdd73..471f276ce8f7 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -50,13 +50,14 @@ struct drm_fb_helper_surface_size {
50 50
51/** 51/**
52 * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library 52 * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library
53 * @gamma_set: - Set the given gamma lut register on the given crtc. 53 * @gamma_set: Set the given gamma lut register on the given crtc.
54 * @gamma_get: - Read the given gamma lut register on the given crtc, used to 54 * @gamma_get: Read the given gamma lut register on the given crtc, used to
55 * save the current lut when force-restoring the fbdev for e.g. 55 * save the current lut when force-restoring the fbdev for e.g.
56 * kdbg. 56 * kdbg.
57 * @fb_probe: - Driver callback to allocate and initialize the fbdev info 57 * @fb_probe: Driver callback to allocate and initialize the fbdev info
58 * structure. Futhermore it also needs to allocate the drm 58 * structure. Futhermore it also needs to allocate the drm
59 * framebuffer used to back the fbdev. 59 * framebuffer used to back the fbdev.
60 * @initial_config: Setup an initial fbdev display configuration
60 * 61 *
61 * Driver callbacks used by the fbdev emulation helper library. 62 * Driver callbacks used by the fbdev emulation helper library.
62 */ 63 */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 5a6d718adf34..b20b03852f21 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -84,8 +84,13 @@ extern int audit_classify_arch(int arch);
84#define AUDIT_TYPE_CHILD_DELETE 3 /* a child being deleted */ 84#define AUDIT_TYPE_CHILD_DELETE 3 /* a child being deleted */
85#define AUDIT_TYPE_CHILD_CREATE 4 /* a child being created */ 85#define AUDIT_TYPE_CHILD_CREATE 4 /* a child being created */
86 86
87/* maximized args number that audit_socketcall can process */
88#define AUDITSC_ARGS 6
89
87struct filename; 90struct filename;
88 91
92extern void audit_log_session_info(struct audit_buffer *ab);
93
89#ifdef CONFIG_AUDITSYSCALL 94#ifdef CONFIG_AUDITSYSCALL
90/* These are defined in auditsc.c */ 95/* These are defined in auditsc.c */
91 /* Public API */ 96 /* Public API */
@@ -120,7 +125,7 @@ static inline void audit_syscall_entry(int arch, int major, unsigned long a0,
120 unsigned long a1, unsigned long a2, 125 unsigned long a1, unsigned long a2,
121 unsigned long a3) 126 unsigned long a3)
122{ 127{
123 if (unlikely(!audit_dummy_context())) 128 if (unlikely(current->audit_context))
124 __audit_syscall_entry(arch, major, a0, a1, a2, a3); 129 __audit_syscall_entry(arch, major, a0, a1, a2, a3);
125} 130}
126static inline void audit_syscall_exit(void *pt_regs) 131static inline void audit_syscall_exit(void *pt_regs)
@@ -185,12 +190,10 @@ static inline int audit_get_sessionid(struct task_struct *tsk)
185 return tsk->sessionid; 190 return tsk->sessionid;
186} 191}
187 192
188extern void audit_log_task_context(struct audit_buffer *ab);
189extern void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk);
190extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); 193extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
191extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); 194extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
192extern int __audit_bprm(struct linux_binprm *bprm); 195extern int __audit_bprm(struct linux_binprm *bprm);
193extern void __audit_socketcall(int nargs, unsigned long *args); 196extern int __audit_socketcall(int nargs, unsigned long *args);
194extern int __audit_sockaddr(int len, void *addr); 197extern int __audit_sockaddr(int len, void *addr);
195extern void __audit_fd_pair(int fd1, int fd2); 198extern void __audit_fd_pair(int fd1, int fd2);
196extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr); 199extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr);
@@ -224,10 +227,11 @@ static inline int audit_bprm(struct linux_binprm *bprm)
224 return __audit_bprm(bprm); 227 return __audit_bprm(bprm);
225 return 0; 228 return 0;
226} 229}
227static inline void audit_socketcall(int nargs, unsigned long *args) 230static inline int audit_socketcall(int nargs, unsigned long *args)
228{ 231{
229 if (unlikely(!audit_dummy_context())) 232 if (unlikely(!audit_dummy_context()))
230 __audit_socketcall(nargs, args); 233 return __audit_socketcall(nargs, args);
234 return 0;
231} 235}
232static inline int audit_sockaddr(int len, void *addr) 236static inline int audit_sockaddr(int len, void *addr)
233{ 237{
@@ -340,11 +344,6 @@ static inline int audit_get_sessionid(struct task_struct *tsk)
340{ 344{
341 return -1; 345 return -1;
342} 346}
343static inline void audit_log_task_context(struct audit_buffer *ab)
344{ }
345static inline void audit_log_task_info(struct audit_buffer *ab,
346 struct task_struct *tsk)
347{ }
348static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) 347static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
349{ } 348{ }
350static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, 349static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
@@ -354,8 +353,10 @@ static inline int audit_bprm(struct linux_binprm *bprm)
354{ 353{
355 return 0; 354 return 0;
356} 355}
357static inline void audit_socketcall(int nargs, unsigned long *args) 356static inline int audit_socketcall(int nargs, unsigned long *args)
358{ } 357{
358 return 0;
359}
359static inline void audit_fd_pair(int fd1, int fd2) 360static inline void audit_fd_pair(int fd1, int fd2)
360{ } 361{ }
361static inline int audit_sockaddr(int len, void *addr) 362static inline int audit_sockaddr(int len, void *addr)
@@ -390,6 +391,11 @@ static inline void audit_ptrace(struct task_struct *t)
390#define audit_signals 0 391#define audit_signals 0
391#endif /* CONFIG_AUDITSYSCALL */ 392#endif /* CONFIG_AUDITSYSCALL */
392 393
394static inline bool audit_loginuid_set(struct task_struct *tsk)
395{
396 return uid_valid(audit_get_loginuid(tsk));
397}
398
393#ifdef CONFIG_AUDIT 399#ifdef CONFIG_AUDIT
394/* These are defined in audit.c */ 400/* These are defined in audit.c */
395 /* Public API */ 401 /* Public API */
@@ -429,14 +435,17 @@ static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid)
429{ } 435{ }
430#endif 436#endif
431 437
438extern int audit_log_task_context(struct audit_buffer *ab);
439extern void audit_log_task_info(struct audit_buffer *ab,
440 struct task_struct *tsk);
441
432extern int audit_update_lsm_rules(void); 442extern int audit_update_lsm_rules(void);
433 443
434 /* Private API (for audit.c only) */ 444 /* Private API (for audit.c only) */
435extern int audit_filter_user(void); 445extern int audit_filter_user(int type);
436extern int audit_filter_type(int type); 446extern int audit_filter_type(int type);
437extern int audit_receive_filter(int type, int pid, int seq, 447extern int audit_receive_filter(int type, int pid, int seq,
438 void *data, size_t datasz, kuid_t loginuid, 448 void *data, size_t datasz);
439 u32 sessionid, u32 sid);
440extern int audit_enabled; 449extern int audit_enabled;
441#else /* CONFIG_AUDIT */ 450#else /* CONFIG_AUDIT */
442static inline __printf(4, 5) 451static inline __printf(4, 5)
@@ -476,6 +485,13 @@ static inline void audit_log_link_denied(const char *string,
476{ } 485{ }
477static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid) 486static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid)
478{ } 487{ }
488static inline int audit_log_task_context(struct audit_buffer *ab)
489{
490 return 0;
491}
492static inline void audit_log_task_info(struct audit_buffer *ab,
493 struct task_struct *tsk)
494{ }
479#define audit_enabled 0 495#define audit_enabled 0
480#endif /* CONFIG_AUDIT */ 496#endif /* CONFIG_AUDIT */
481static inline void audit_log_string(struct audit_buffer *ab, const char *buf) 497static inline void audit_log_string(struct audit_buffer *ab, const char *buf)
diff --git a/include/linux/compat.h b/include/linux/compat.h
index d53c35352ea9..7f0c1dd09079 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -673,6 +673,8 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long);
673asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, 673asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
674 struct compat_timespec __user *interval); 674 struct compat_timespec __user *interval);
675 675
676asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
677 int, const char __user *);
676#else 678#else
677 679
678#define is_compat_task() (0) 680#define is_compat_task() (0)
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 3c86faa59798..8f0406230a0a 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -17,7 +17,7 @@
17#include <linux/completion.h> 17#include <linux/completion.h>
18#include <linux/hrtimer.h> 18#include <linux/hrtimer.h>
19 19
20#define CPUIDLE_STATE_MAX 8 20#define CPUIDLE_STATE_MAX 10
21#define CPUIDLE_NAME_LEN 16 21#define CPUIDLE_NAME_LEN 16
22#define CPUIDLE_DESC_LEN 32 22#define CPUIDLE_DESC_LEN 32
23 23
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 1e483fa7afb4..3cd32478f2fd 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -79,11 +79,26 @@ typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
79typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm, 79typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
80 struct bio_vec *biovec, int max_size); 80 struct bio_vec *biovec, int max_size);
81 81
82/*
83 * These iteration functions are typically used to check (and combine)
84 * properties of underlying devices.
85 * E.g. Does at least one underlying device support flush?
86 * Does any underlying device not support WRITE_SAME?
87 *
88 * The callout function is called once for each contiguous section of
89 * an underlying device. State can be maintained in *data.
90 * Return non-zero to stop iterating through any further devices.
91 */
82typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, 92typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
83 struct dm_dev *dev, 93 struct dm_dev *dev,
84 sector_t start, sector_t len, 94 sector_t start, sector_t len,
85 void *data); 95 void *data);
86 96
97/*
98 * This function must iterate through each section of device used by the
99 * target until it encounters a non-zero return code, which it then returns.
100 * Returns zero if no callout returned non-zero.
101 */
87typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, 102typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
88 iterate_devices_callout_fn fn, 103 iterate_devices_callout_fn fn,
89 void *data); 104 void *data);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index f83e17a40e8b..99d0fbcbaf79 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -90,6 +90,8 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
90 * not set this, then the ftrace infrastructure will add recursion 90 * not set this, then the ftrace infrastructure will add recursion
91 * protection for the caller. 91 * protection for the caller.
92 * STUB - The ftrace_ops is just a place holder. 92 * STUB - The ftrace_ops is just a place holder.
93 * INITIALIZED - The ftrace_ops has already been initialized (first use time
94 * register_ftrace_function() is called, it will initialized the ops)
93 */ 95 */
94enum { 96enum {
95 FTRACE_OPS_FL_ENABLED = 1 << 0, 97 FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -100,6 +102,7 @@ enum {
100 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, 102 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
101 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, 103 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
102 FTRACE_OPS_FL_STUB = 1 << 7, 104 FTRACE_OPS_FL_STUB = 1 << 7,
105 FTRACE_OPS_FL_INITIALIZED = 1 << 8,
103}; 106};
104 107
105struct ftrace_ops { 108struct ftrace_ops {
@@ -110,6 +113,7 @@ struct ftrace_ops {
110#ifdef CONFIG_DYNAMIC_FTRACE 113#ifdef CONFIG_DYNAMIC_FTRACE
111 struct ftrace_hash *notrace_hash; 114 struct ftrace_hash *notrace_hash;
112 struct ftrace_hash *filter_hash; 115 struct ftrace_hash *filter_hash;
116 struct mutex regex_lock;
113#endif 117#endif
114}; 118};
115 119
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 34e00fb49bec..4372658c73ae 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -293,6 +293,7 @@ struct ftrace_event_file {
293 * caching and such. Which is mostly OK ;-) 293 * caching and such. Which is mostly OK ;-)
294 */ 294 */
295 unsigned long flags; 295 unsigned long flags;
296 atomic_t sm_ref; /* soft-mode reference counter */
296}; 297};
297 298
298#define __TRACE_EVENT_FLAGS(name, value) \ 299#define __TRACE_EVENT_FLAGS(name, value) \
diff --git a/include/linux/hid.h b/include/linux/hid.h
index af1b86d46f6e..0c48991b0402 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -515,7 +515,7 @@ struct hid_device { /* device report descriptor */
515 struct dentry *debug_rdesc; 515 struct dentry *debug_rdesc;
516 struct dentry *debug_events; 516 struct dentry *debug_events;
517 struct list_head debug_list; 517 struct list_head debug_list;
518 struct mutex debug_list_lock; 518 spinlock_t debug_list_lock;
519 wait_queue_head_t debug_wait; 519 wait_queue_head_t debug_wait;
520}; 520};
521 521
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 2b85c521f737..c12916248469 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2147,11 +2147,13 @@
2147#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e 2147#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e
2148#define PCI_DEVICE_ID_NX2_57712 0x1662 2148#define PCI_DEVICE_ID_NX2_57712 0x1662
2149#define PCI_DEVICE_ID_NX2_57712E 0x1663 2149#define PCI_DEVICE_ID_NX2_57712E 0x1663
2150#define PCI_DEVICE_ID_NX2_57712_MF 0x1663
2150#define PCI_DEVICE_ID_TIGON3_5714 0x1668 2151#define PCI_DEVICE_ID_TIGON3_5714 0x1668
2151#define PCI_DEVICE_ID_TIGON3_5714S 0x1669 2152#define PCI_DEVICE_ID_TIGON3_5714S 0x1669
2152#define PCI_DEVICE_ID_TIGON3_5780 0x166a 2153#define PCI_DEVICE_ID_TIGON3_5780 0x166a
2153#define PCI_DEVICE_ID_TIGON3_5780S 0x166b 2154#define PCI_DEVICE_ID_TIGON3_5780S 0x166b
2154#define PCI_DEVICE_ID_TIGON3_5705F 0x166e 2155#define PCI_DEVICE_ID_TIGON3_5705F 0x166e
2156#define PCI_DEVICE_ID_NX2_57712_VF 0x166f
2155#define PCI_DEVICE_ID_TIGON3_5754M 0x1672 2157#define PCI_DEVICE_ID_TIGON3_5754M 0x1672
2156#define PCI_DEVICE_ID_TIGON3_5755M 0x1673 2158#define PCI_DEVICE_ID_TIGON3_5755M 0x1673
2157#define PCI_DEVICE_ID_TIGON3_5756 0x1674 2159#define PCI_DEVICE_ID_TIGON3_5756 0x1674
@@ -2177,13 +2179,15 @@
2177#define PCI_DEVICE_ID_TIGON3_5787 0x169b 2179#define PCI_DEVICE_ID_TIGON3_5787 0x169b
2178#define PCI_DEVICE_ID_TIGON3_5788 0x169c 2180#define PCI_DEVICE_ID_TIGON3_5788 0x169c
2179#define PCI_DEVICE_ID_TIGON3_5789 0x169d 2181#define PCI_DEVICE_ID_TIGON3_5789 0x169d
2182#define PCI_DEVICE_ID_NX2_57840_4_10 0x16a1
2183#define PCI_DEVICE_ID_NX2_57840_2_20 0x16a2
2184#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4
2180#define PCI_DEVICE_ID_NX2_57800_MF 0x16a5 2185#define PCI_DEVICE_ID_NX2_57800_MF 0x16a5
2181#define PCI_DEVICE_ID_TIGON3_5702X 0x16a6 2186#define PCI_DEVICE_ID_TIGON3_5702X 0x16a6
2182#define PCI_DEVICE_ID_TIGON3_5703X 0x16a7 2187#define PCI_DEVICE_ID_TIGON3_5703X 0x16a7
2183#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 2188#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8
2184#define PCI_DEVICE_ID_NX2_57800_VF 0x16a9 2189#define PCI_DEVICE_ID_NX2_57800_VF 0x16a9
2185#define PCI_DEVICE_ID_NX2_5706S 0x16aa 2190#define PCI_DEVICE_ID_NX2_5706S 0x16aa
2186#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4
2187#define PCI_DEVICE_ID_NX2_5708S 0x16ac 2191#define PCI_DEVICE_ID_NX2_5708S 0x16ac
2188#define PCI_DEVICE_ID_NX2_57840_VF 0x16ad 2192#define PCI_DEVICE_ID_NX2_57840_VF 0x16ad
2189#define PCI_DEVICE_ID_NX2_57810_MF 0x16ae 2193#define PCI_DEVICE_ID_NX2_57810_MF 0x16ae
diff --git a/include/linux/sched.h b/include/linux/sched.h
index caa8f4d0186b..178a8d909f14 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -593,6 +593,7 @@ struct signal_struct {
593#endif 593#endif
594#ifdef CONFIG_AUDIT 594#ifdef CONFIG_AUDIT
595 unsigned audit_tty; 595 unsigned audit_tty;
596 unsigned audit_tty_log_passwd;
596 struct tty_audit_buf *tty_audit_buf; 597 struct tty_audit_buf *tty_audit_buf;
597#endif 598#endif
598#ifdef CONFIG_CGROUPS 599#ifdef CONFIG_CGROUPS
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 733eb5ee31c5..6ff26c8db7b9 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -57,7 +57,7 @@ extern struct bus_type spi_bus_type;
57 * @modalias: Name of the driver to use with this device, or an alias 57 * @modalias: Name of the driver to use with this device, or an alias
58 * for that name. This appears in the sysfs "modalias" attribute 58 * for that name. This appears in the sysfs "modalias" attribute
59 * for driver coldplugging, and in uevents used for hotplugging 59 * for driver coldplugging, and in uevents used for hotplugging
60 * @cs_gpio: gpio number of the chipselect line (optional, -EINVAL when 60 * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
61 * when not using a GPIO line) 61 * when not using a GPIO line)
62 * 62 *
63 * A @spi_device is used to interchange data between an SPI slave 63 * A @spi_device is used to interchange data between an SPI slave
@@ -266,7 +266,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
266 * queue so the subsystem notifies the driver that it may relax the 266 * queue so the subsystem notifies the driver that it may relax the
267 * hardware by issuing this call 267 * hardware by issuing this call
268 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS 268 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
269 * number. Any individual value may be -EINVAL for CS lines that 269 * number. Any individual value may be -ENOENT for CS lines that
270 * are not GPIOs (driven by the SPI controller itself). 270 * are not GPIOs (driven by the SPI controller itself).
271 * 271 *
272 * Each SPI master controller can communicate with one or more @spi_device 272 * Each SPI master controller can communicate with one or more @spi_device
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 7e92bd86a808..8780bd2a272a 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -575,8 +575,7 @@ extern void tty_audit_exit(void);
575extern void tty_audit_fork(struct signal_struct *sig); 575extern void tty_audit_fork(struct signal_struct *sig);
576extern void tty_audit_tiocsti(struct tty_struct *tty, char ch); 576extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
577extern void tty_audit_push(struct tty_struct *tty); 577extern void tty_audit_push(struct tty_struct *tty);
578extern int tty_audit_push_task(struct task_struct *tsk, 578extern int tty_audit_push_current(void);
579 kuid_t loginuid, u32 sessionid);
580#else 579#else
581static inline void tty_audit_add_data(struct tty_struct *tty, 580static inline void tty_audit_add_data(struct tty_struct *tty,
582 unsigned char *data, size_t size, unsigned icanon) 581 unsigned char *data, size_t size, unsigned icanon)
@@ -594,8 +593,7 @@ static inline void tty_audit_fork(struct signal_struct *sig)
594static inline void tty_audit_push(struct tty_struct *tty) 593static inline void tty_audit_push(struct tty_struct *tty)
595{ 594{
596} 595}
597static inline int tty_audit_push_task(struct task_struct *tsk, 596static inline int tty_audit_push_current(void)
598 kuid_t loginuid, u32 sessionid)
599{ 597{
600 return 0; 598 return 0;
601} 599}
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index ef937b56f9b5..e2c1e66d58ae 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -118,7 +118,7 @@ struct ex_phy {
118 118
119 enum ex_phy_state phy_state; 119 enum ex_phy_state phy_state;
120 120
121 enum sas_dev_type attached_dev_type; 121 enum sas_device_type attached_dev_type;
122 enum sas_linkrate linkrate; 122 enum sas_linkrate linkrate;
123 123
124 u8 attached_sata_host:1; 124 u8 attached_sata_host:1;
@@ -195,7 +195,7 @@ enum {
195 195
196struct domain_device { 196struct domain_device {
197 spinlock_t done_lock; 197 spinlock_t done_lock;
198 enum sas_dev_type dev_type; 198 enum sas_device_type dev_type;
199 199
200 enum sas_linkrate linkrate; 200 enum sas_linkrate linkrate;
201 enum sas_linkrate min_linkrate; 201 enum sas_linkrate min_linkrate;
diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h
index a6026da25f3e..25ac6283b9c7 100644
--- a/include/scsi/osd_protocol.h
+++ b/include/scsi/osd_protocol.h
@@ -107,7 +107,7 @@ enum osd_attributes_mode {
107 * int exponent: 04; 107 * int exponent: 04;
108 * } 108 * }
109 */ 109 */
110typedef __be32 __bitwise osd_cdb_offset; 110typedef __be32 osd_cdb_offset;
111 111
112enum { 112enum {
113 OSD_OFFSET_UNUSED = 0xFFFFFFFF, 113 OSD_OFFSET_UNUSED = 0xFFFFFFFF,
diff --git a/include/scsi/sas.h b/include/scsi/sas.h
index be3eb0bf1ac0..0d2607d12387 100644
--- a/include/scsi/sas.h
+++ b/include/scsi/sas.h
@@ -90,16 +90,18 @@ enum sas_oob_mode {
90}; 90};
91 91
92/* See sas_discover.c if you plan on changing these */ 92/* See sas_discover.c if you plan on changing these */
93enum sas_dev_type { 93enum sas_device_type {
94 NO_DEVICE = 0, /* protocol */ 94 /* these are SAS protocol defined (attached device type field) */
95 SAS_END_DEV = 1, /* protocol */ 95 SAS_PHY_UNUSED = 0,
96 EDGE_DEV = 2, /* protocol */ 96 SAS_END_DEVICE = 1,
97 FANOUT_DEV = 3, /* protocol */ 97 SAS_EDGE_EXPANDER_DEVICE = 2,
98 SAS_HA = 4, 98 SAS_FANOUT_EXPANDER_DEVICE = 3,
99 SATA_DEV = 5, 99 /* these are internal to libsas */
100 SATA_PM = 7, 100 SAS_HA = 4,
101 SATA_PM_PORT= 8, 101 SAS_SATA_DEV = 5,
102 SATA_PENDING = 9, 102 SAS_SATA_PM = 7,
103 SAS_SATA_PM_PORT = 8,
104 SAS_SATA_PENDING = 9,
103}; 105};
104 106
105enum sas_protocol { 107enum sas_protocol {
diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
index ff71a5654684..00f41aeeecf5 100644
--- a/include/scsi/sas_ata.h
+++ b/include/scsi/sas_ata.h
@@ -32,8 +32,8 @@
32 32
33static inline int dev_is_sata(struct domain_device *dev) 33static inline int dev_is_sata(struct domain_device *dev)
34{ 34{
35 return dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM || 35 return dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM ||
36 dev->dev_type == SATA_PM_PORT || dev->dev_type == SATA_PENDING; 36 dev->dev_type == SAS_SATA_PM_PORT || dev->dev_type == SAS_SATA_PENDING;
37} 37}
38 38
39int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy); 39int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index a7f9cba275e9..cc645876d147 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -394,10 +394,18 @@ extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
394 int data_direction, void *buffer, unsigned bufflen, 394 int data_direction, void *buffer, unsigned bufflen,
395 unsigned char *sense, int timeout, int retries, 395 unsigned char *sense, int timeout, int retries,
396 int flag, int *resid); 396 int flag, int *resid);
397extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, 397extern int scsi_execute_req_flags(struct scsi_device *sdev,
398 int data_direction, void *buffer, unsigned bufflen, 398 const unsigned char *cmd, int data_direction, void *buffer,
399 struct scsi_sense_hdr *, int timeout, int retries, 399 unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
400 int *resid); 400 int retries, int *resid, int flags);
401static inline int scsi_execute_req(struct scsi_device *sdev,
402 const unsigned char *cmd, int data_direction, void *buffer,
403 unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
404 int retries, int *resid)
405{
406 return scsi_execute_req_flags(sdev, cmd, data_direction, buffer,
407 bufflen, sshdr, timeout, retries, resid, 0);
408}
401extern void sdev_disable_disk_events(struct scsi_device *sdev); 409extern void sdev_disable_disk_events(struct scsi_device *sdev);
402extern void sdev_enable_disk_events(struct scsi_device *sdev); 410extern void sdev_enable_disk_events(struct scsi_device *sdev);
403 411
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 4a58cca2ecc1..d0f1602985e7 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -471,14 +471,10 @@ iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess);
471extern void iscsi_destroy_all_flashnode(struct Scsi_Host *shost); 471extern void iscsi_destroy_all_flashnode(struct Scsi_Host *shost);
472extern int iscsi_flashnode_bus_match(struct device *dev, 472extern int iscsi_flashnode_bus_match(struct device *dev,
473 struct device_driver *drv); 473 struct device_driver *drv);
474extern int iscsi_is_flashnode_conn_dev(struct device *dev, void *data);
475
476extern struct device * 474extern struct device *
477iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data, 475iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
478 int (*fn)(struct device *dev, void *data)); 476 int (*fn)(struct device *dev, void *data));
479
480extern struct device * 477extern struct device *
481iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess, 478iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess);
482 void *data, 479
483 int (*fn)(struct device *dev, void *data));
484#endif 480#endif
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 9b8e08879cfc..0bd71e2702e3 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -10,13 +10,6 @@ struct scsi_transport_template;
10struct sas_rphy; 10struct sas_rphy;
11struct request; 11struct request;
12 12
13enum sas_device_type {
14 SAS_PHY_UNUSED = 0,
15 SAS_END_DEVICE = 1,
16 SAS_EDGE_EXPANDER_DEVICE = 2,
17 SAS_FANOUT_EXPANDER_DEVICE = 3,
18};
19
20static inline int sas_protocol_ata(enum sas_protocol proto) 13static inline int sas_protocol_ata(enum sas_protocol proto)
21{ 14{
22 return ((proto & SAS_PROTOCOL_SATA) || 15 return ((proto & SAS_PROTOCOL_SATA) ||
diff --git a/include/sound/tlv.h b/include/sound/tlv.h
index 28c65e1ada21..e11e179420a1 100644
--- a/include/sound/tlv.h
+++ b/include/sound/tlv.h
@@ -74,7 +74,11 @@
74#define DECLARE_TLV_DB_LINEAR(name, min_dB, max_dB) \ 74#define DECLARE_TLV_DB_LINEAR(name, min_dB, max_dB) \
75 unsigned int name[] = { TLV_DB_LINEAR_ITEM(min_dB, max_dB) } 75 unsigned int name[] = { TLV_DB_LINEAR_ITEM(min_dB, max_dB) }
76 76
77/* dB range container */ 77/* dB range container:
78 * Items in dB range container must be ordered by their values and by their
79 * dB values. This implies that larger values must correspond with larger
80 * dB values (which is also required for all other mixer controls).
81 */
78/* Each item is: <min> <max> <TLV> */ 82/* Each item is: <min> <max> <TLV> */
79#define TLV_DB_RANGE_ITEM(...) \ 83#define TLV_DB_RANGE_ITEM(...) \
80 TLV_ITEM(SNDRV_CTL_TLVT_DB_RANGE, __VA_ARGS__) 84 TLV_ITEM(SNDRV_CTL_TLVT_DB_RANGE, __VA_ARGS__)
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 9f096f1c0907..75cef3fd97ad 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -246,6 +246,7 @@
246#define AUDIT_OBJ_TYPE 21 246#define AUDIT_OBJ_TYPE 21
247#define AUDIT_OBJ_LEV_LOW 22 247#define AUDIT_OBJ_LEV_LOW 22
248#define AUDIT_OBJ_LEV_HIGH 23 248#define AUDIT_OBJ_LEV_HIGH 23
249#define AUDIT_LOGINUID_SET 24
249 250
250 /* These are ONLY useful when checking 251 /* These are ONLY useful when checking
251 * at syscall exit time (AUDIT_AT_EXIT). */ 252 * at syscall exit time (AUDIT_AT_EXIT). */
@@ -369,7 +370,8 @@ struct audit_status {
369}; 370};
370 371
371struct audit_tty_status { 372struct audit_tty_status {
372 __u32 enabled; /* 1 = enabled, 0 = disabled */ 373 __u32 enabled; /* 1 = enabled, 0 = disabled */
374 __u32 log_passwd; /* 1 = enabled, 0 = disabled */
373}; 375};
374 376
375/* audit_rule_data supports filter rules with both integer and string 377/* audit_rule_data supports filter rules with both integer and string
diff --git a/kernel/audit.c b/kernel/audit.c
index 0b084fa44b1f..21c7fa615bd3 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -49,6 +49,8 @@
49#include <linux/slab.h> 49#include <linux/slab.h>
50#include <linux/err.h> 50#include <linux/err.h>
51#include <linux/kthread.h> 51#include <linux/kthread.h>
52#include <linux/kernel.h>
53#include <linux/syscalls.h>
52 54
53#include <linux/audit.h> 55#include <linux/audit.h>
54 56
@@ -265,7 +267,6 @@ void audit_log_lost(const char *message)
265} 267}
266 268
267static int audit_log_config_change(char *function_name, int new, int old, 269static int audit_log_config_change(char *function_name, int new, int old,
268 kuid_t loginuid, u32 sessionid, u32 sid,
269 int allow_changes) 270 int allow_changes)
270{ 271{
271 struct audit_buffer *ab; 272 struct audit_buffer *ab;
@@ -274,29 +275,17 @@ static int audit_log_config_change(char *function_name, int new, int old,
274 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); 275 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
275 if (unlikely(!ab)) 276 if (unlikely(!ab))
276 return rc; 277 return rc;
277 audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new, 278 audit_log_format(ab, "%s=%d old=%d", function_name, new, old);
278 old, from_kuid(&init_user_ns, loginuid), sessionid); 279 audit_log_session_info(ab);
279 if (sid) { 280 rc = audit_log_task_context(ab);
280 char *ctx = NULL; 281 if (rc)
281 u32 len; 282 allow_changes = 0; /* Something weird, deny request */
282
283 rc = security_secid_to_secctx(sid, &ctx, &len);
284 if (rc) {
285 audit_log_format(ab, " sid=%u", sid);
286 allow_changes = 0; /* Something weird, deny request */
287 } else {
288 audit_log_format(ab, " subj=%s", ctx);
289 security_release_secctx(ctx, len);
290 }
291 }
292 audit_log_format(ab, " res=%d", allow_changes); 283 audit_log_format(ab, " res=%d", allow_changes);
293 audit_log_end(ab); 284 audit_log_end(ab);
294 return rc; 285 return rc;
295} 286}
296 287
297static int audit_do_config_change(char *function_name, int *to_change, 288static int audit_do_config_change(char *function_name, int *to_change, int new)
298 int new, kuid_t loginuid, u32 sessionid,
299 u32 sid)
300{ 289{
301 int allow_changes, rc = 0, old = *to_change; 290 int allow_changes, rc = 0, old = *to_change;
302 291
@@ -307,8 +296,7 @@ static int audit_do_config_change(char *function_name, int *to_change,
307 allow_changes = 1; 296 allow_changes = 1;
308 297
309 if (audit_enabled != AUDIT_OFF) { 298 if (audit_enabled != AUDIT_OFF) {
310 rc = audit_log_config_change(function_name, new, old, loginuid, 299 rc = audit_log_config_change(function_name, new, old, allow_changes);
311 sessionid, sid, allow_changes);
312 if (rc) 300 if (rc)
313 allow_changes = 0; 301 allow_changes = 0;
314 } 302 }
@@ -322,44 +310,37 @@ static int audit_do_config_change(char *function_name, int *to_change,
322 return rc; 310 return rc;
323} 311}
324 312
325static int audit_set_rate_limit(int limit, kuid_t loginuid, u32 sessionid, 313static int audit_set_rate_limit(int limit)
326 u32 sid)
327{ 314{
328 return audit_do_config_change("audit_rate_limit", &audit_rate_limit, 315 return audit_do_config_change("audit_rate_limit", &audit_rate_limit, limit);
329 limit, loginuid, sessionid, sid);
330} 316}
331 317
332static int audit_set_backlog_limit(int limit, kuid_t loginuid, u32 sessionid, 318static int audit_set_backlog_limit(int limit)
333 u32 sid)
334{ 319{
335 return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit, 320 return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit, limit);
336 limit, loginuid, sessionid, sid);
337} 321}
338 322
339static int audit_set_enabled(int state, kuid_t loginuid, u32 sessionid, u32 sid) 323static int audit_set_enabled(int state)
340{ 324{
341 int rc; 325 int rc;
342 if (state < AUDIT_OFF || state > AUDIT_LOCKED) 326 if (state < AUDIT_OFF || state > AUDIT_LOCKED)
343 return -EINVAL; 327 return -EINVAL;
344 328
345 rc = audit_do_config_change("audit_enabled", &audit_enabled, state, 329 rc = audit_do_config_change("audit_enabled", &audit_enabled, state);
346 loginuid, sessionid, sid);
347
348 if (!rc) 330 if (!rc)
349 audit_ever_enabled |= !!state; 331 audit_ever_enabled |= !!state;
350 332
351 return rc; 333 return rc;
352} 334}
353 335
354static int audit_set_failure(int state, kuid_t loginuid, u32 sessionid, u32 sid) 336static int audit_set_failure(int state)
355{ 337{
356 if (state != AUDIT_FAIL_SILENT 338 if (state != AUDIT_FAIL_SILENT
357 && state != AUDIT_FAIL_PRINTK 339 && state != AUDIT_FAIL_PRINTK
358 && state != AUDIT_FAIL_PANIC) 340 && state != AUDIT_FAIL_PANIC)
359 return -EINVAL; 341 return -EINVAL;
360 342
361 return audit_do_config_change("audit_failure", &audit_failure, state, 343 return audit_do_config_change("audit_failure", &audit_failure, state);
362 loginuid, sessionid, sid);
363} 344}
364 345
365/* 346/*
@@ -417,34 +398,53 @@ static void kauditd_send_skb(struct sk_buff *skb)
417 consume_skb(skb); 398 consume_skb(skb);
418} 399}
419 400
420static int kauditd_thread(void *dummy) 401/*
402 * flush_hold_queue - empty the hold queue if auditd appears
403 *
404 * If auditd just started, drain the queue of messages already
405 * sent to syslog/printk. Remember loss here is ok. We already
406 * called audit_log_lost() if it didn't go out normally. so the
407 * race between the skb_dequeue and the next check for audit_pid
408 * doesn't matter.
409 *
410 * If you ever find kauditd to be too slow we can get a perf win
411 * by doing our own locking and keeping better track if there
412 * are messages in this queue. I don't see the need now, but
413 * in 5 years when I want to play with this again I'll see this
414 * note and still have no friggin idea what i'm thinking today.
415 */
416static void flush_hold_queue(void)
421{ 417{
422 struct sk_buff *skb; 418 struct sk_buff *skb;
423 419
420 if (!audit_default || !audit_pid)
421 return;
422
423 skb = skb_dequeue(&audit_skb_hold_queue);
424 if (likely(!skb))
425 return;
426
427 while (skb && audit_pid) {
428 kauditd_send_skb(skb);
429 skb = skb_dequeue(&audit_skb_hold_queue);
430 }
431
432 /*
433 * if auditd just disappeared but we
434 * dequeued an skb we need to drop ref
435 */
436 if (skb)
437 consume_skb(skb);
438}
439
440static int kauditd_thread(void *dummy)
441{
424 set_freezable(); 442 set_freezable();
425 while (!kthread_should_stop()) { 443 while (!kthread_should_stop()) {
426 /* 444 struct sk_buff *skb;
427 * if auditd just started drain the queue of messages already 445 DECLARE_WAITQUEUE(wait, current);
428 * sent to syslog/printk. remember loss here is ok. we already 446
429 * called audit_log_lost() if it didn't go out normally. so the 447 flush_hold_queue();
430 * race between the skb_dequeue and the next check for audit_pid
431 * doesn't matter.
432 *
433 * if you ever find kauditd to be too slow we can get a perf win
434 * by doing our own locking and keeping better track if there
435 * are messages in this queue. I don't see the need now, but
436 * in 5 years when I want to play with this again I'll see this
437 * note and still have no friggin idea what i'm thinking today.
438 */
439 if (audit_default && audit_pid) {
440 skb = skb_dequeue(&audit_skb_hold_queue);
441 if (unlikely(skb)) {
442 while (skb && audit_pid) {
443 kauditd_send_skb(skb);
444 skb = skb_dequeue(&audit_skb_hold_queue);
445 }
446 }
447 }
448 448
449 skb = skb_dequeue(&audit_skb_queue); 449 skb = skb_dequeue(&audit_skb_queue);
450 wake_up(&audit_backlog_wait); 450 wake_up(&audit_backlog_wait);
@@ -453,19 +453,18 @@ static int kauditd_thread(void *dummy)
453 kauditd_send_skb(skb); 453 kauditd_send_skb(skb);
454 else 454 else
455 audit_printk_skb(skb); 455 audit_printk_skb(skb);
456 } else { 456 continue;
457 DECLARE_WAITQUEUE(wait, current); 457 }
458 set_current_state(TASK_INTERRUPTIBLE); 458 set_current_state(TASK_INTERRUPTIBLE);
459 add_wait_queue(&kauditd_wait, &wait); 459 add_wait_queue(&kauditd_wait, &wait);
460
461 if (!skb_queue_len(&audit_skb_queue)) {
462 try_to_freeze();
463 schedule();
464 }
465 460
466 __set_current_state(TASK_RUNNING); 461 if (!skb_queue_len(&audit_skb_queue)) {
467 remove_wait_queue(&kauditd_wait, &wait); 462 try_to_freeze();
463 schedule();
468 } 464 }
465
466 __set_current_state(TASK_RUNNING);
467 remove_wait_queue(&kauditd_wait, &wait);
469 } 468 }
470 return 0; 469 return 0;
471} 470}
@@ -579,13 +578,14 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
579 return -EPERM; 578 return -EPERM;
580 579
581 switch (msg_type) { 580 switch (msg_type) {
582 case AUDIT_GET:
583 case AUDIT_LIST: 581 case AUDIT_LIST:
584 case AUDIT_LIST_RULES:
585 case AUDIT_SET:
586 case AUDIT_ADD: 582 case AUDIT_ADD:
587 case AUDIT_ADD_RULE:
588 case AUDIT_DEL: 583 case AUDIT_DEL:
584 return -EOPNOTSUPP;
585 case AUDIT_GET:
586 case AUDIT_SET:
587 case AUDIT_LIST_RULES:
588 case AUDIT_ADD_RULE:
589 case AUDIT_DEL_RULE: 589 case AUDIT_DEL_RULE:
590 case AUDIT_SIGNAL_INFO: 590 case AUDIT_SIGNAL_INFO:
591 case AUDIT_TTY_GET: 591 case AUDIT_TTY_GET:
@@ -608,12 +608,10 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
608 return err; 608 return err;
609} 609}
610 610
611static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type, 611static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
612 kuid_t auid, u32 ses, u32 sid)
613{ 612{
614 int rc = 0; 613 int rc = 0;
615 char *ctx = NULL; 614 uid_t uid = from_kuid(&init_user_ns, current_uid());
616 u32 len;
617 615
618 if (!audit_enabled) { 616 if (!audit_enabled) {
619 *ab = NULL; 617 *ab = NULL;
@@ -623,33 +621,21 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
623 *ab = audit_log_start(NULL, GFP_KERNEL, msg_type); 621 *ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
624 if (unlikely(!*ab)) 622 if (unlikely(!*ab))
625 return rc; 623 return rc;
626 audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u", 624 audit_log_format(*ab, "pid=%d uid=%u", task_tgid_vnr(current), uid);
627 task_tgid_vnr(current), 625 audit_log_session_info(*ab);
628 from_kuid(&init_user_ns, current_uid()), 626 audit_log_task_context(*ab);
629 from_kuid(&init_user_ns, auid), ses);
630 if (sid) {
631 rc = security_secid_to_secctx(sid, &ctx, &len);
632 if (rc)
633 audit_log_format(*ab, " ssid=%u", sid);
634 else {
635 audit_log_format(*ab, " subj=%s", ctx);
636 security_release_secctx(ctx, len);
637 }
638 }
639 627
640 return rc; 628 return rc;
641} 629}
642 630
643static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 631static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
644{ 632{
645 u32 seq, sid; 633 u32 seq;
646 void *data; 634 void *data;
647 struct audit_status *status_get, status_set; 635 struct audit_status *status_get, status_set;
648 int err; 636 int err;
649 struct audit_buffer *ab; 637 struct audit_buffer *ab;
650 u16 msg_type = nlh->nlmsg_type; 638 u16 msg_type = nlh->nlmsg_type;
651 kuid_t loginuid; /* loginuid of sender */
652 u32 sessionid;
653 struct audit_sig_info *sig_data; 639 struct audit_sig_info *sig_data;
654 char *ctx = NULL; 640 char *ctx = NULL;
655 u32 len; 641 u32 len;
@@ -668,9 +654,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
668 return err; 654 return err;
669 } 655 }
670 } 656 }
671 loginuid = audit_get_loginuid(current);
672 sessionid = audit_get_sessionid(current);
673 security_task_getsecid(current, &sid);
674 seq = nlh->nlmsg_seq; 657 seq = nlh->nlmsg_seq;
675 data = nlmsg_data(nlh); 658 data = nlmsg_data(nlh);
676 659
@@ -691,14 +674,12 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
691 return -EINVAL; 674 return -EINVAL;
692 status_get = (struct audit_status *)data; 675 status_get = (struct audit_status *)data;
693 if (status_get->mask & AUDIT_STATUS_ENABLED) { 676 if (status_get->mask & AUDIT_STATUS_ENABLED) {
694 err = audit_set_enabled(status_get->enabled, 677 err = audit_set_enabled(status_get->enabled);
695 loginuid, sessionid, sid);
696 if (err < 0) 678 if (err < 0)
697 return err; 679 return err;
698 } 680 }
699 if (status_get->mask & AUDIT_STATUS_FAILURE) { 681 if (status_get->mask & AUDIT_STATUS_FAILURE) {
700 err = audit_set_failure(status_get->failure, 682 err = audit_set_failure(status_get->failure);
701 loginuid, sessionid, sid);
702 if (err < 0) 683 if (err < 0)
703 return err; 684 return err;
704 } 685 }
@@ -706,22 +687,17 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
706 int new_pid = status_get->pid; 687 int new_pid = status_get->pid;
707 688
708 if (audit_enabled != AUDIT_OFF) 689 if (audit_enabled != AUDIT_OFF)
709 audit_log_config_change("audit_pid", new_pid, 690 audit_log_config_change("audit_pid", new_pid, audit_pid, 1);
710 audit_pid, loginuid,
711 sessionid, sid, 1);
712
713 audit_pid = new_pid; 691 audit_pid = new_pid;
714 audit_nlk_portid = NETLINK_CB(skb).portid; 692 audit_nlk_portid = NETLINK_CB(skb).portid;
715 } 693 }
716 if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) { 694 if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) {
717 err = audit_set_rate_limit(status_get->rate_limit, 695 err = audit_set_rate_limit(status_get->rate_limit);
718 loginuid, sessionid, sid);
719 if (err < 0) 696 if (err < 0)
720 return err; 697 return err;
721 } 698 }
722 if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT) 699 if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT)
723 err = audit_set_backlog_limit(status_get->backlog_limit, 700 err = audit_set_backlog_limit(status_get->backlog_limit);
724 loginuid, sessionid, sid);
725 break; 701 break;
726 case AUDIT_USER: 702 case AUDIT_USER:
727 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: 703 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
@@ -729,25 +705,22 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
729 if (!audit_enabled && msg_type != AUDIT_USER_AVC) 705 if (!audit_enabled && msg_type != AUDIT_USER_AVC)
730 return 0; 706 return 0;
731 707
732 err = audit_filter_user(); 708 err = audit_filter_user(msg_type);
733 if (err == 1) { 709 if (err == 1) {
734 err = 0; 710 err = 0;
735 if (msg_type == AUDIT_USER_TTY) { 711 if (msg_type == AUDIT_USER_TTY) {
736 err = tty_audit_push_task(current, loginuid, 712 err = tty_audit_push_current();
737 sessionid);
738 if (err) 713 if (err)
739 break; 714 break;
740 } 715 }
741 audit_log_common_recv_msg(&ab, msg_type, 716 audit_log_common_recv_msg(&ab, msg_type);
742 loginuid, sessionid, sid);
743
744 if (msg_type != AUDIT_USER_TTY) 717 if (msg_type != AUDIT_USER_TTY)
745 audit_log_format(ab, " msg='%.1024s'", 718 audit_log_format(ab, " msg='%.1024s'",
746 (char *)data); 719 (char *)data);
747 else { 720 else {
748 int size; 721 int size;
749 722
750 audit_log_format(ab, " msg="); 723 audit_log_format(ab, " data=");
751 size = nlmsg_len(nlh); 724 size = nlmsg_len(nlh);
752 if (size > 0 && 725 if (size > 0 &&
753 ((unsigned char *)data)[size - 1] == '\0') 726 ((unsigned char *)data)[size - 1] == '\0')
@@ -758,50 +731,24 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
758 audit_log_end(ab); 731 audit_log_end(ab);
759 } 732 }
760 break; 733 break;
761 case AUDIT_ADD:
762 case AUDIT_DEL:
763 if (nlmsg_len(nlh) < sizeof(struct audit_rule))
764 return -EINVAL;
765 if (audit_enabled == AUDIT_LOCKED) {
766 audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
767 loginuid, sessionid, sid);
768
769 audit_log_format(ab, " audit_enabled=%d res=0",
770 audit_enabled);
771 audit_log_end(ab);
772 return -EPERM;
773 }
774 /* fallthrough */
775 case AUDIT_LIST:
776 err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid,
777 seq, data, nlmsg_len(nlh),
778 loginuid, sessionid, sid);
779 break;
780 case AUDIT_ADD_RULE: 734 case AUDIT_ADD_RULE:
781 case AUDIT_DEL_RULE: 735 case AUDIT_DEL_RULE:
782 if (nlmsg_len(nlh) < sizeof(struct audit_rule_data)) 736 if (nlmsg_len(nlh) < sizeof(struct audit_rule_data))
783 return -EINVAL; 737 return -EINVAL;
784 if (audit_enabled == AUDIT_LOCKED) { 738 if (audit_enabled == AUDIT_LOCKED) {
785 audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, 739 audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
786 loginuid, sessionid, sid); 740 audit_log_format(ab, " audit_enabled=%d res=0", audit_enabled);
787
788 audit_log_format(ab, " audit_enabled=%d res=0",
789 audit_enabled);
790 audit_log_end(ab); 741 audit_log_end(ab);
791 return -EPERM; 742 return -EPERM;
792 } 743 }
793 /* fallthrough */ 744 /* fallthrough */
794 case AUDIT_LIST_RULES: 745 case AUDIT_LIST_RULES:
795 err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid, 746 err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid,
796 seq, data, nlmsg_len(nlh), 747 seq, data, nlmsg_len(nlh));
797 loginuid, sessionid, sid);
798 break; 748 break;
799 case AUDIT_TRIM: 749 case AUDIT_TRIM:
800 audit_trim_trees(); 750 audit_trim_trees();
801 751 audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
802 audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
803 loginuid, sessionid, sid);
804
805 audit_log_format(ab, " op=trim res=1"); 752 audit_log_format(ab, " op=trim res=1");
806 audit_log_end(ab); 753 audit_log_end(ab);
807 break; 754 break;
@@ -831,8 +778,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
831 /* OK, here comes... */ 778 /* OK, here comes... */
832 err = audit_tag_tree(old, new); 779 err = audit_tag_tree(old, new);
833 780
834 audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, 781 audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
835 loginuid, sessionid, sid);
836 782
837 audit_log_format(ab, " op=make_equiv old="); 783 audit_log_format(ab, " op=make_equiv old=");
838 audit_log_untrustedstring(ab, old); 784 audit_log_untrustedstring(ab, old);
@@ -871,27 +817,30 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
871 struct audit_tty_status s; 817 struct audit_tty_status s;
872 struct task_struct *tsk = current; 818 struct task_struct *tsk = current;
873 819
874 spin_lock_irq(&tsk->sighand->siglock); 820 spin_lock(&tsk->sighand->siglock);
875 s.enabled = tsk->signal->audit_tty != 0; 821 s.enabled = tsk->signal->audit_tty != 0;
876 spin_unlock_irq(&tsk->sighand->siglock); 822 s.log_passwd = tsk->signal->audit_tty_log_passwd;
823 spin_unlock(&tsk->sighand->siglock);
877 824
878 audit_send_reply(NETLINK_CB(skb).portid, seq, 825 audit_send_reply(NETLINK_CB(skb).portid, seq,
879 AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); 826 AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
880 break; 827 break;
881 } 828 }
882 case AUDIT_TTY_SET: { 829 case AUDIT_TTY_SET: {
883 struct audit_tty_status *s; 830 struct audit_tty_status s;
884 struct task_struct *tsk = current; 831 struct task_struct *tsk = current;
885 832
886 if (nlh->nlmsg_len < sizeof(struct audit_tty_status)) 833 memset(&s, 0, sizeof(s));
887 return -EINVAL; 834 /* guard against past and future API changes */
888 s = data; 835 memcpy(&s, data, min(sizeof(s), (size_t)nlh->nlmsg_len));
889 if (s->enabled != 0 && s->enabled != 1) 836 if ((s.enabled != 0 && s.enabled != 1) ||
837 (s.log_passwd != 0 && s.log_passwd != 1))
890 return -EINVAL; 838 return -EINVAL;
891 839
892 spin_lock_irq(&tsk->sighand->siglock); 840 spin_lock(&tsk->sighand->siglock);
893 tsk->signal->audit_tty = s->enabled != 0; 841 tsk->signal->audit_tty = s.enabled;
894 spin_unlock_irq(&tsk->sighand->siglock); 842 tsk->signal->audit_tty_log_passwd = s.log_passwd;
843 spin_unlock(&tsk->sighand->siglock);
895 break; 844 break;
896 } 845 }
897 default: 846 default:
@@ -1434,6 +1383,14 @@ void audit_log_d_path(struct audit_buffer *ab, const char *prefix,
1434 kfree(pathname); 1383 kfree(pathname);
1435} 1384}
1436 1385
1386void audit_log_session_info(struct audit_buffer *ab)
1387{
1388 u32 sessionid = audit_get_sessionid(current);
1389 uid_t auid = from_kuid(&init_user_ns, audit_get_loginuid(current));
1390
1391 audit_log_format(ab, " auid=%u ses=%u\n", auid, sessionid);
1392}
1393
1437void audit_log_key(struct audit_buffer *ab, char *key) 1394void audit_log_key(struct audit_buffer *ab, char *key)
1438{ 1395{
1439 audit_log_format(ab, " key="); 1396 audit_log_format(ab, " key=");
@@ -1443,6 +1400,224 @@ void audit_log_key(struct audit_buffer *ab, char *key)
1443 audit_log_format(ab, "(null)"); 1400 audit_log_format(ab, "(null)");
1444} 1401}
1445 1402
1403void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
1404{
1405 int i;
1406
1407 audit_log_format(ab, " %s=", prefix);
1408 CAP_FOR_EACH_U32(i) {
1409 audit_log_format(ab, "%08x",
1410 cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
1411 }
1412}
1413
1414void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name)
1415{
1416 kernel_cap_t *perm = &name->fcap.permitted;
1417 kernel_cap_t *inh = &name->fcap.inheritable;
1418 int log = 0;
1419
1420 if (!cap_isclear(*perm)) {
1421 audit_log_cap(ab, "cap_fp", perm);
1422 log = 1;
1423 }
1424 if (!cap_isclear(*inh)) {
1425 audit_log_cap(ab, "cap_fi", inh);
1426 log = 1;
1427 }
1428
1429 if (log)
1430 audit_log_format(ab, " cap_fe=%d cap_fver=%x",
1431 name->fcap.fE, name->fcap_ver);
1432}
1433
1434static inline int audit_copy_fcaps(struct audit_names *name,
1435 const struct dentry *dentry)
1436{
1437 struct cpu_vfs_cap_data caps;
1438 int rc;
1439
1440 if (!dentry)
1441 return 0;
1442
1443 rc = get_vfs_caps_from_disk(dentry, &caps);
1444 if (rc)
1445 return rc;
1446
1447 name->fcap.permitted = caps.permitted;
1448 name->fcap.inheritable = caps.inheritable;
1449 name->fcap.fE = !!(caps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE);
1450 name->fcap_ver = (caps.magic_etc & VFS_CAP_REVISION_MASK) >>
1451 VFS_CAP_REVISION_SHIFT;
1452
1453 return 0;
1454}
1455
1456/* Copy inode data into an audit_names. */
1457void audit_copy_inode(struct audit_names *name, const struct dentry *dentry,
1458 const struct inode *inode)
1459{
1460 name->ino = inode->i_ino;
1461 name->dev = inode->i_sb->s_dev;
1462 name->mode = inode->i_mode;
1463 name->uid = inode->i_uid;
1464 name->gid = inode->i_gid;
1465 name->rdev = inode->i_rdev;
1466 security_inode_getsecid(inode, &name->osid);
1467 audit_copy_fcaps(name, dentry);
1468}
1469
1470/**
1471 * audit_log_name - produce AUDIT_PATH record from struct audit_names
1472 * @context: audit_context for the task
1473 * @n: audit_names structure with reportable details
1474 * @path: optional path to report instead of audit_names->name
1475 * @record_num: record number to report when handling a list of names
1476 * @call_panic: optional pointer to int that will be updated if secid fails
1477 */
1478void audit_log_name(struct audit_context *context, struct audit_names *n,
1479 struct path *path, int record_num, int *call_panic)
1480{
1481 struct audit_buffer *ab;
1482 ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH);
1483 if (!ab)
1484 return;
1485
1486 audit_log_format(ab, "item=%d", record_num);
1487
1488 if (path)
1489 audit_log_d_path(ab, " name=", path);
1490 else if (n->name) {
1491 switch (n->name_len) {
1492 case AUDIT_NAME_FULL:
1493 /* log the full path */
1494 audit_log_format(ab, " name=");
1495 audit_log_untrustedstring(ab, n->name->name);
1496 break;
1497 case 0:
1498 /* name was specified as a relative path and the
1499 * directory component is the cwd */
1500 audit_log_d_path(ab, " name=", &context->pwd);
1501 break;
1502 default:
1503 /* log the name's directory component */
1504 audit_log_format(ab, " name=");
1505 audit_log_n_untrustedstring(ab, n->name->name,
1506 n->name_len);
1507 }
1508 } else
1509 audit_log_format(ab, " name=(null)");
1510
1511 if (n->ino != (unsigned long)-1) {
1512 audit_log_format(ab, " inode=%lu"
1513 " dev=%02x:%02x mode=%#ho"
1514 " ouid=%u ogid=%u rdev=%02x:%02x",
1515 n->ino,
1516 MAJOR(n->dev),
1517 MINOR(n->dev),
1518 n->mode,
1519 from_kuid(&init_user_ns, n->uid),
1520 from_kgid(&init_user_ns, n->gid),
1521 MAJOR(n->rdev),
1522 MINOR(n->rdev));
1523 }
1524 if (n->osid != 0) {
1525 char *ctx = NULL;
1526 u32 len;
1527 if (security_secid_to_secctx(
1528 n->osid, &ctx, &len)) {
1529 audit_log_format(ab, " osid=%u", n->osid);
1530 if (call_panic)
1531 *call_panic = 2;
1532 } else {
1533 audit_log_format(ab, " obj=%s", ctx);
1534 security_release_secctx(ctx, len);
1535 }
1536 }
1537
1538 audit_log_fcaps(ab, n);
1539 audit_log_end(ab);
1540}
1541
1542int audit_log_task_context(struct audit_buffer *ab)
1543{
1544 char *ctx = NULL;
1545 unsigned len;
1546 int error;
1547 u32 sid;
1548
1549 security_task_getsecid(current, &sid);
1550 if (!sid)
1551 return 0;
1552
1553 error = security_secid_to_secctx(sid, &ctx, &len);
1554 if (error) {
1555 if (error != -EINVAL)
1556 goto error_path;
1557 return 0;
1558 }
1559
1560 audit_log_format(ab, " subj=%s", ctx);
1561 security_release_secctx(ctx, len);
1562 return 0;
1563
1564error_path:
1565 audit_panic("error in audit_log_task_context");
1566 return error;
1567}
1568EXPORT_SYMBOL(audit_log_task_context);
1569
1570void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
1571{
1572 const struct cred *cred;
1573 char name[sizeof(tsk->comm)];
1574 struct mm_struct *mm = tsk->mm;
1575 char *tty;
1576
1577 if (!ab)
1578 return;
1579
1580 /* tsk == current */
1581 cred = current_cred();
1582
1583 spin_lock_irq(&tsk->sighand->siglock);
1584 if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name)
1585 tty = tsk->signal->tty->name;
1586 else
1587 tty = "(none)";
1588 spin_unlock_irq(&tsk->sighand->siglock);
1589
1590 audit_log_format(ab,
1591 " ppid=%ld pid=%d auid=%u uid=%u gid=%u"
1592 " euid=%u suid=%u fsuid=%u"
1593 " egid=%u sgid=%u fsgid=%u ses=%u tty=%s",
1594 sys_getppid(),
1595 tsk->pid,
1596 from_kuid(&init_user_ns, audit_get_loginuid(tsk)),
1597 from_kuid(&init_user_ns, cred->uid),
1598 from_kgid(&init_user_ns, cred->gid),
1599 from_kuid(&init_user_ns, cred->euid),
1600 from_kuid(&init_user_ns, cred->suid),
1601 from_kuid(&init_user_ns, cred->fsuid),
1602 from_kgid(&init_user_ns, cred->egid),
1603 from_kgid(&init_user_ns, cred->sgid),
1604 from_kgid(&init_user_ns, cred->fsgid),
1605 audit_get_sessionid(tsk), tty);
1606
1607 get_task_comm(name, tsk);
1608 audit_log_format(ab, " comm=");
1609 audit_log_untrustedstring(ab, name);
1610
1611 if (mm) {
1612 down_read(&mm->mmap_sem);
1613 if (mm->exe_file)
1614 audit_log_d_path(ab, " exe=", &mm->exe_file->f_path);
1615 up_read(&mm->mmap_sem);
1616 }
1617 audit_log_task_context(ab);
1618}
1619EXPORT_SYMBOL(audit_log_task_info);
1620
1446/** 1621/**
1447 * audit_log_link_denied - report a link restriction denial 1622 * audit_log_link_denied - report a link restriction denial
1448 * @operation: specific link opreation 1623 * @operation: specific link opreation
@@ -1451,19 +1626,28 @@ void audit_log_key(struct audit_buffer *ab, char *key)
1451void audit_log_link_denied(const char *operation, struct path *link) 1626void audit_log_link_denied(const char *operation, struct path *link)
1452{ 1627{
1453 struct audit_buffer *ab; 1628 struct audit_buffer *ab;
1629 struct audit_names *name;
1630
1631 name = kzalloc(sizeof(*name), GFP_NOFS);
1632 if (!name)
1633 return;
1454 1634
1635 /* Generate AUDIT_ANOM_LINK with subject, operation, outcome. */
1455 ab = audit_log_start(current->audit_context, GFP_KERNEL, 1636 ab = audit_log_start(current->audit_context, GFP_KERNEL,
1456 AUDIT_ANOM_LINK); 1637 AUDIT_ANOM_LINK);
1457 if (!ab) 1638 if (!ab)
1458 return; 1639 goto out;
1459 audit_log_format(ab, "op=%s action=denied", operation); 1640 audit_log_format(ab, "op=%s", operation);
1460 audit_log_format(ab, " pid=%d comm=", current->pid); 1641 audit_log_task_info(ab, current);
1461 audit_log_untrustedstring(ab, current->comm); 1642 audit_log_format(ab, " res=0");
1462 audit_log_d_path(ab, " path=", link);
1463 audit_log_format(ab, " dev=");
1464 audit_log_untrustedstring(ab, link->dentry->d_inode->i_sb->s_id);
1465 audit_log_format(ab, " ino=%lu", link->dentry->d_inode->i_ino);
1466 audit_log_end(ab); 1643 audit_log_end(ab);
1644
1645 /* Generate AUDIT_PATH record with object. */
1646 name->type = AUDIT_TYPE_NORMAL;
1647 audit_copy_inode(name, link->dentry, link->dentry->d_inode);
1648 audit_log_name(current->audit_context, name, link, 0, NULL);
1649out:
1650 kfree(name);
1467} 1651}
1468 1652
1469/** 1653/**
diff --git a/kernel/audit.h b/kernel/audit.h
index 11468d99dad0..1c95131ef760 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -22,6 +22,7 @@
22#include <linux/fs.h> 22#include <linux/fs.h>
23#include <linux/audit.h> 23#include <linux/audit.h>
24#include <linux/skbuff.h> 24#include <linux/skbuff.h>
25#include <uapi/linux/mqueue.h>
25 26
26/* 0 = no checking 27/* 0 = no checking
27 1 = put_count checking 28 1 = put_count checking
@@ -29,6 +30,11 @@
29*/ 30*/
30#define AUDIT_DEBUG 0 31#define AUDIT_DEBUG 0
31 32
33/* AUDIT_NAMES is the number of slots we reserve in the audit_context
34 * for saving names from getname(). If we get more names we will allocate
35 * a name dynamically and also add those to the list anchored by names_list. */
36#define AUDIT_NAMES 5
37
32/* At task start time, the audit_state is set in the audit_context using 38/* At task start time, the audit_state is set in the audit_context using
33 a per-task filter. At syscall entry, the audit_state is augmented by 39 a per-task filter. At syscall entry, the audit_state is augmented by
34 the syscall filter. */ 40 the syscall filter. */
@@ -59,8 +65,158 @@ struct audit_entry {
59 struct audit_krule rule; 65 struct audit_krule rule;
60}; 66};
61 67
68struct audit_cap_data {
69 kernel_cap_t permitted;
70 kernel_cap_t inheritable;
71 union {
72 unsigned int fE; /* effective bit of file cap */
73 kernel_cap_t effective; /* effective set of process */
74 };
75};
76
77/* When fs/namei.c:getname() is called, we store the pointer in name and
78 * we don't let putname() free it (instead we free all of the saved
79 * pointers at syscall exit time).
80 *
81 * Further, in fs/namei.c:path_lookup() we store the inode and device.
82 */
83struct audit_names {
84 struct list_head list; /* audit_context->names_list */
85
86 struct filename *name;
87 int name_len; /* number of chars to log */
88 bool name_put; /* call __putname()? */
89
90 unsigned long ino;
91 dev_t dev;
92 umode_t mode;
93 kuid_t uid;
94 kgid_t gid;
95 dev_t rdev;
96 u32 osid;
97 struct audit_cap_data fcap;
98 unsigned int fcap_ver;
99 unsigned char type; /* record type */
100 /*
101 * This was an allocated audit_names and not from the array of
102 * names allocated in the task audit context. Thus this name
103 * should be freed on syscall exit.
104 */
105 bool should_free;
106};
107
108/* The per-task audit context. */
109struct audit_context {
110 int dummy; /* must be the first element */
111 int in_syscall; /* 1 if task is in a syscall */
112 enum audit_state state, current_state;
113 unsigned int serial; /* serial number for record */
114 int major; /* syscall number */
115 struct timespec ctime; /* time of syscall entry */
116 unsigned long argv[4]; /* syscall arguments */
117 long return_code;/* syscall return code */
118 u64 prio;
119 int return_valid; /* return code is valid */
120 /*
121 * The names_list is the list of all audit_names collected during this
122 * syscall. The first AUDIT_NAMES entries in the names_list will
123 * actually be from the preallocated_names array for performance
124 * reasons. Except during allocation they should never be referenced
125 * through the preallocated_names array and should only be found/used
126 * by running the names_list.
127 */
128 struct audit_names preallocated_names[AUDIT_NAMES];
129 int name_count; /* total records in names_list */
130 struct list_head names_list; /* struct audit_names->list anchor */
131 char *filterkey; /* key for rule that triggered record */
132 struct path pwd;
133 struct audit_aux_data *aux;
134 struct audit_aux_data *aux_pids;
135 struct sockaddr_storage *sockaddr;
136 size_t sockaddr_len;
137 /* Save things to print about task_struct */
138 pid_t pid, ppid;
139 kuid_t uid, euid, suid, fsuid;
140 kgid_t gid, egid, sgid, fsgid;
141 unsigned long personality;
142 int arch;
143
144 pid_t target_pid;
145 kuid_t target_auid;
146 kuid_t target_uid;
147 unsigned int target_sessionid;
148 u32 target_sid;
149 char target_comm[TASK_COMM_LEN];
150
151 struct audit_tree_refs *trees, *first_trees;
152 struct list_head killed_trees;
153 int tree_count;
154
155 int type;
156 union {
157 struct {
158 int nargs;
159 long args[6];
160 } socketcall;
161 struct {
162 kuid_t uid;
163 kgid_t gid;
164 umode_t mode;
165 u32 osid;
166 int has_perm;
167 uid_t perm_uid;
168 gid_t perm_gid;
169 umode_t perm_mode;
170 unsigned long qbytes;
171 } ipc;
172 struct {
173 mqd_t mqdes;
174 struct mq_attr mqstat;
175 } mq_getsetattr;
176 struct {
177 mqd_t mqdes;
178 int sigev_signo;
179 } mq_notify;
180 struct {
181 mqd_t mqdes;
182 size_t msg_len;
183 unsigned int msg_prio;
184 struct timespec abs_timeout;
185 } mq_sendrecv;
186 struct {
187 int oflag;
188 umode_t mode;
189 struct mq_attr attr;
190 } mq_open;
191 struct {
192 pid_t pid;
193 struct audit_cap_data cap;
194 } capset;
195 struct {
196 int fd;
197 int flags;
198 } mmap;
199 };
200 int fds[2];
201
202#if AUDIT_DEBUG
203 int put_count;
204 int ino_count;
205#endif
206};
207
62extern int audit_ever_enabled; 208extern int audit_ever_enabled;
63 209
210extern void audit_copy_inode(struct audit_names *name,
211 const struct dentry *dentry,
212 const struct inode *inode);
213extern void audit_log_cap(struct audit_buffer *ab, char *prefix,
214 kernel_cap_t *cap);
215extern void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name);
216extern void audit_log_name(struct audit_context *context,
217 struct audit_names *n, struct path *path,
218 int record_num, int *call_panic);
219
64extern int audit_pid; 220extern int audit_pid;
65 221
66#define AUDIT_INODE_BUCKETS 32 222#define AUDIT_INODE_BUCKETS 32
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 267436826c3b..83a2970295d1 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -310,121 +310,83 @@ static u32 audit_to_op(u32 op)
310 return n; 310 return n;
311} 311}
312 312
313 313/* check if an audit field is valid */
314/* Translate struct audit_rule to kernel's rule respresentation. 314static int audit_field_valid(struct audit_entry *entry, struct audit_field *f)
315 * Exists for backward compatibility with userspace. */
316static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
317{ 315{
318 struct audit_entry *entry; 316 switch(f->type) {
319 int err = 0; 317 case AUDIT_MSGTYPE:
320 int i; 318 if (entry->rule.listnr != AUDIT_FILTER_TYPE &&
321 319 entry->rule.listnr != AUDIT_FILTER_USER)
322 entry = audit_to_entry_common(rule); 320 return -EINVAL;
323 if (IS_ERR(entry)) 321 break;
324 goto exit_nofree; 322 };
325
326 for (i = 0; i < rule->field_count; i++) {
327 struct audit_field *f = &entry->rule.fields[i];
328 u32 n;
329
330 n = rule->fields[i] & (AUDIT_NEGATE|AUDIT_OPERATORS);
331
332 /* Support for legacy operators where
333 * AUDIT_NEGATE bit signifies != and otherwise assumes == */
334 if (n & AUDIT_NEGATE)
335 f->op = Audit_not_equal;
336 else if (!n)
337 f->op = Audit_equal;
338 else
339 f->op = audit_to_op(n);
340
341 entry->rule.vers_ops = (n & AUDIT_OPERATORS) ? 2 : 1;
342
343 f->type = rule->fields[i] & ~(AUDIT_NEGATE|AUDIT_OPERATORS);
344 f->val = rule->values[i];
345 f->uid = INVALID_UID;
346 f->gid = INVALID_GID;
347
348 err = -EINVAL;
349 if (f->op == Audit_bad)
350 goto exit_free;
351
352 switch(f->type) {
353 default:
354 goto exit_free;
355 case AUDIT_UID:
356 case AUDIT_EUID:
357 case AUDIT_SUID:
358 case AUDIT_FSUID:
359 case AUDIT_LOGINUID:
360 /* bit ops not implemented for uid comparisons */
361 if (f->op == Audit_bitmask || f->op == Audit_bittest)
362 goto exit_free;
363
364 f->uid = make_kuid(current_user_ns(), f->val);
365 if (!uid_valid(f->uid))
366 goto exit_free;
367 break;
368 case AUDIT_GID:
369 case AUDIT_EGID:
370 case AUDIT_SGID:
371 case AUDIT_FSGID:
372 /* bit ops not implemented for gid comparisons */
373 if (f->op == Audit_bitmask || f->op == Audit_bittest)
374 goto exit_free;
375
376 f->gid = make_kgid(current_user_ns(), f->val);
377 if (!gid_valid(f->gid))
378 goto exit_free;
379 break;
380 case AUDIT_PID:
381 case AUDIT_PERS:
382 case AUDIT_MSGTYPE:
383 case AUDIT_PPID:
384 case AUDIT_DEVMAJOR:
385 case AUDIT_DEVMINOR:
386 case AUDIT_EXIT:
387 case AUDIT_SUCCESS:
388 /* bit ops are only useful on syscall args */
389 if (f->op == Audit_bitmask || f->op == Audit_bittest)
390 goto exit_free;
391 break;
392 case AUDIT_ARG0:
393 case AUDIT_ARG1:
394 case AUDIT_ARG2:
395 case AUDIT_ARG3:
396 break;
397 /* arch is only allowed to be = or != */
398 case AUDIT_ARCH:
399 if (f->op != Audit_not_equal && f->op != Audit_equal)
400 goto exit_free;
401 entry->rule.arch_f = f;
402 break;
403 case AUDIT_PERM:
404 if (f->val & ~15)
405 goto exit_free;
406 break;
407 case AUDIT_FILETYPE:
408 if (f->val & ~S_IFMT)
409 goto exit_free;
410 break;
411 case AUDIT_INODE:
412 err = audit_to_inode(&entry->rule, f);
413 if (err)
414 goto exit_free;
415 break;
416 }
417 }
418
419 if (entry->rule.inode_f && entry->rule.inode_f->op == Audit_not_equal)
420 entry->rule.inode_f = NULL;
421
422exit_nofree:
423 return entry;
424 323
425exit_free: 324 switch(f->type) {
426 audit_free_rule(entry); 325 default:
427 return ERR_PTR(err); 326 return -EINVAL;
327 case AUDIT_UID:
328 case AUDIT_EUID:
329 case AUDIT_SUID:
330 case AUDIT_FSUID:
331 case AUDIT_LOGINUID:
332 case AUDIT_OBJ_UID:
333 case AUDIT_GID:
334 case AUDIT_EGID:
335 case AUDIT_SGID:
336 case AUDIT_FSGID:
337 case AUDIT_OBJ_GID:
338 case AUDIT_PID:
339 case AUDIT_PERS:
340 case AUDIT_MSGTYPE:
341 case AUDIT_PPID:
342 case AUDIT_DEVMAJOR:
343 case AUDIT_DEVMINOR:
344 case AUDIT_EXIT:
345 case AUDIT_SUCCESS:
346 /* bit ops are only useful on syscall args */
347 if (f->op == Audit_bitmask || f->op == Audit_bittest)
348 return -EINVAL;
349 break;
350 case AUDIT_ARG0:
351 case AUDIT_ARG1:
352 case AUDIT_ARG2:
353 case AUDIT_ARG3:
354 case AUDIT_SUBJ_USER:
355 case AUDIT_SUBJ_ROLE:
356 case AUDIT_SUBJ_TYPE:
357 case AUDIT_SUBJ_SEN:
358 case AUDIT_SUBJ_CLR:
359 case AUDIT_OBJ_USER:
360 case AUDIT_OBJ_ROLE:
361 case AUDIT_OBJ_TYPE:
362 case AUDIT_OBJ_LEV_LOW:
363 case AUDIT_OBJ_LEV_HIGH:
364 case AUDIT_WATCH:
365 case AUDIT_DIR:
366 case AUDIT_FILTERKEY:
367 break;
368 case AUDIT_LOGINUID_SET:
369 if ((f->val != 0) && (f->val != 1))
370 return -EINVAL;
371 /* FALL THROUGH */
372 case AUDIT_ARCH:
373 if (f->op != Audit_not_equal && f->op != Audit_equal)
374 return -EINVAL;
375 break;
376 case AUDIT_PERM:
377 if (f->val & ~15)
378 return -EINVAL;
379 break;
380 case AUDIT_FILETYPE:
381 if (f->val & ~S_IFMT)
382 return -EINVAL;
383 break;
384 case AUDIT_FIELD_COMPARE:
385 if (f->val > AUDIT_MAX_FIELD_COMPARE)
386 return -EINVAL;
387 break;
388 };
389 return 0;
428} 390}
429 391
430/* Translate struct audit_rule_data to kernel's rule respresentation. */ 392/* Translate struct audit_rule_data to kernel's rule respresentation. */
@@ -459,17 +421,25 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
459 f->gid = INVALID_GID; 421 f->gid = INVALID_GID;
460 f->lsm_str = NULL; 422 f->lsm_str = NULL;
461 f->lsm_rule = NULL; 423 f->lsm_rule = NULL;
462 switch(f->type) { 424
425 /* Support legacy tests for a valid loginuid */
426 if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295)) {
427 f->type = AUDIT_LOGINUID_SET;
428 f->val = 0;
429 }
430
431 err = audit_field_valid(entry, f);
432 if (err)
433 goto exit_free;
434
435 err = -EINVAL;
436 switch (f->type) {
437 case AUDIT_LOGINUID:
463 case AUDIT_UID: 438 case AUDIT_UID:
464 case AUDIT_EUID: 439 case AUDIT_EUID:
465 case AUDIT_SUID: 440 case AUDIT_SUID:
466 case AUDIT_FSUID: 441 case AUDIT_FSUID:
467 case AUDIT_LOGINUID:
468 case AUDIT_OBJ_UID: 442 case AUDIT_OBJ_UID:
469 /* bit ops not implemented for uid comparisons */
470 if (f->op == Audit_bitmask || f->op == Audit_bittest)
471 goto exit_free;
472
473 f->uid = make_kuid(current_user_ns(), f->val); 443 f->uid = make_kuid(current_user_ns(), f->val);
474 if (!uid_valid(f->uid)) 444 if (!uid_valid(f->uid))
475 goto exit_free; 445 goto exit_free;
@@ -479,27 +449,10 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
479 case AUDIT_SGID: 449 case AUDIT_SGID:
480 case AUDIT_FSGID: 450 case AUDIT_FSGID:
481 case AUDIT_OBJ_GID: 451 case AUDIT_OBJ_GID:
482 /* bit ops not implemented for gid comparisons */
483 if (f->op == Audit_bitmask || f->op == Audit_bittest)
484 goto exit_free;
485
486 f->gid = make_kgid(current_user_ns(), f->val); 452 f->gid = make_kgid(current_user_ns(), f->val);
487 if (!gid_valid(f->gid)) 453 if (!gid_valid(f->gid))
488 goto exit_free; 454 goto exit_free;
489 break; 455 break;
490 case AUDIT_PID:
491 case AUDIT_PERS:
492 case AUDIT_MSGTYPE:
493 case AUDIT_PPID:
494 case AUDIT_DEVMAJOR:
495 case AUDIT_DEVMINOR:
496 case AUDIT_EXIT:
497 case AUDIT_SUCCESS:
498 case AUDIT_ARG0:
499 case AUDIT_ARG1:
500 case AUDIT_ARG2:
501 case AUDIT_ARG3:
502 break;
503 case AUDIT_ARCH: 456 case AUDIT_ARCH:
504 entry->rule.arch_f = f; 457 entry->rule.arch_f = f;
505 break; 458 break;
@@ -570,20 +523,6 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
570 entry->rule.buflen += f->val; 523 entry->rule.buflen += f->val;
571 entry->rule.filterkey = str; 524 entry->rule.filterkey = str;
572 break; 525 break;
573 case AUDIT_PERM:
574 if (f->val & ~15)
575 goto exit_free;
576 break;
577 case AUDIT_FILETYPE:
578 if (f->val & ~S_IFMT)
579 goto exit_free;
580 break;
581 case AUDIT_FIELD_COMPARE:
582 if (f->val > AUDIT_MAX_FIELD_COMPARE)
583 goto exit_free;
584 break;
585 default:
586 goto exit_free;
587 } 526 }
588 } 527 }
589 528
@@ -613,36 +552,6 @@ static inline size_t audit_pack_string(void **bufp, const char *str)
613 return len; 552 return len;
614} 553}
615 554
616/* Translate kernel rule respresentation to struct audit_rule.
617 * Exists for backward compatibility with userspace. */
618static struct audit_rule *audit_krule_to_rule(struct audit_krule *krule)
619{
620 struct audit_rule *rule;
621 int i;
622
623 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
624 if (unlikely(!rule))
625 return NULL;
626
627 rule->flags = krule->flags | krule->listnr;
628 rule->action = krule->action;
629 rule->field_count = krule->field_count;
630 for (i = 0; i < rule->field_count; i++) {
631 rule->values[i] = krule->fields[i].val;
632 rule->fields[i] = krule->fields[i].type;
633
634 if (krule->vers_ops == 1) {
635 if (krule->fields[i].op == Audit_not_equal)
636 rule->fields[i] |= AUDIT_NEGATE;
637 } else {
638 rule->fields[i] |= audit_ops[krule->fields[i].op];
639 }
640 }
641 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) rule->mask[i] = krule->mask[i];
642
643 return rule;
644}
645
646/* Translate kernel rule respresentation to struct audit_rule_data. */ 555/* Translate kernel rule respresentation to struct audit_rule_data. */
647static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule) 556static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
648{ 557{
@@ -1055,35 +964,6 @@ out:
1055 return ret; 964 return ret;
1056} 965}
1057 966
1058/* List rules using struct audit_rule. Exists for backward
1059 * compatibility with userspace. */
1060static void audit_list(int pid, int seq, struct sk_buff_head *q)
1061{
1062 struct sk_buff *skb;
1063 struct audit_krule *r;
1064 int i;
1065
1066 /* This is a blocking read, so use audit_filter_mutex instead of rcu
1067 * iterator to sync with list writers. */
1068 for (i=0; i<AUDIT_NR_FILTERS; i++) {
1069 list_for_each_entry(r, &audit_rules_list[i], list) {
1070 struct audit_rule *rule;
1071
1072 rule = audit_krule_to_rule(r);
1073 if (unlikely(!rule))
1074 break;
1075 skb = audit_make_reply(pid, seq, AUDIT_LIST, 0, 1,
1076 rule, sizeof(*rule));
1077 if (skb)
1078 skb_queue_tail(q, skb);
1079 kfree(rule);
1080 }
1081 }
1082 skb = audit_make_reply(pid, seq, AUDIT_LIST, 1, 1, NULL, 0);
1083 if (skb)
1084 skb_queue_tail(q, skb);
1085}
1086
1087/* List rules using struct audit_rule_data. */ 967/* List rules using struct audit_rule_data. */
1088static void audit_list_rules(int pid, int seq, struct sk_buff_head *q) 968static void audit_list_rules(int pid, int seq, struct sk_buff_head *q)
1089{ 969{
@@ -1113,11 +993,11 @@ static void audit_list_rules(int pid, int seq, struct sk_buff_head *q)
1113} 993}
1114 994
1115/* Log rule additions and removals */ 995/* Log rule additions and removals */
1116static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid, 996static void audit_log_rule_change(char *action, struct audit_krule *rule, int res)
1117 char *action, struct audit_krule *rule,
1118 int res)
1119{ 997{
1120 struct audit_buffer *ab; 998 struct audit_buffer *ab;
999 uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(current));
1000 u32 sessionid = audit_get_sessionid(current);
1121 1001
1122 if (!audit_enabled) 1002 if (!audit_enabled)
1123 return; 1003 return;
@@ -1125,18 +1005,8 @@ static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid,
1125 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); 1005 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
1126 if (!ab) 1006 if (!ab)
1127 return; 1007 return;
1128 audit_log_format(ab, "auid=%u ses=%u", 1008 audit_log_format(ab, "auid=%u ses=%u" ,loginuid, sessionid);
1129 from_kuid(&init_user_ns, loginuid), sessionid); 1009 audit_log_task_context(ab);
1130 if (sid) {
1131 char *ctx = NULL;
1132 u32 len;
1133 if (security_secid_to_secctx(sid, &ctx, &len))
1134 audit_log_format(ab, " ssid=%u", sid);
1135 else {
1136 audit_log_format(ab, " subj=%s", ctx);
1137 security_release_secctx(ctx, len);
1138 }
1139 }
1140 audit_log_format(ab, " op="); 1010 audit_log_format(ab, " op=");
1141 audit_log_string(ab, action); 1011 audit_log_string(ab, action);
1142 audit_log_key(ab, rule->filterkey); 1012 audit_log_key(ab, rule->filterkey);
@@ -1155,8 +1025,7 @@ static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid,
1155 * @sessionid: sessionid for netlink audit message 1025 * @sessionid: sessionid for netlink audit message
1156 * @sid: SE Linux Security ID of sender 1026 * @sid: SE Linux Security ID of sender
1157 */ 1027 */
1158int audit_receive_filter(int type, int pid, int seq, void *data, 1028int audit_receive_filter(int type, int pid, int seq, void *data, size_t datasz)
1159 size_t datasz, kuid_t loginuid, u32 sessionid, u32 sid)
1160{ 1029{
1161 struct task_struct *tsk; 1030 struct task_struct *tsk;
1162 struct audit_netlink_list *dest; 1031 struct audit_netlink_list *dest;
@@ -1164,7 +1033,6 @@ int audit_receive_filter(int type, int pid, int seq, void *data,
1164 struct audit_entry *entry; 1033 struct audit_entry *entry;
1165 1034
1166 switch (type) { 1035 switch (type) {
1167 case AUDIT_LIST:
1168 case AUDIT_LIST_RULES: 1036 case AUDIT_LIST_RULES:
1169 /* We can't just spew out the rules here because we might fill 1037 /* We can't just spew out the rules here because we might fill
1170 * the available socket buffer space and deadlock waiting for 1038 * the available socket buffer space and deadlock waiting for
@@ -1179,10 +1047,7 @@ int audit_receive_filter(int type, int pid, int seq, void *data,
1179 skb_queue_head_init(&dest->q); 1047 skb_queue_head_init(&dest->q);
1180 1048
1181 mutex_lock(&audit_filter_mutex); 1049 mutex_lock(&audit_filter_mutex);
1182 if (type == AUDIT_LIST) 1050 audit_list_rules(pid, seq, &dest->q);
1183 audit_list(pid, seq, &dest->q);
1184 else
1185 audit_list_rules(pid, seq, &dest->q);
1186 mutex_unlock(&audit_filter_mutex); 1051 mutex_unlock(&audit_filter_mutex);
1187 1052
1188 tsk = kthread_run(audit_send_list, dest, "audit_send_list"); 1053 tsk = kthread_run(audit_send_list, dest, "audit_send_list");
@@ -1192,35 +1057,23 @@ int audit_receive_filter(int type, int pid, int seq, void *data,
1192 err = PTR_ERR(tsk); 1057 err = PTR_ERR(tsk);
1193 } 1058 }
1194 break; 1059 break;
1195 case AUDIT_ADD:
1196 case AUDIT_ADD_RULE: 1060 case AUDIT_ADD_RULE:
1197 if (type == AUDIT_ADD) 1061 entry = audit_data_to_entry(data, datasz);
1198 entry = audit_rule_to_entry(data);
1199 else
1200 entry = audit_data_to_entry(data, datasz);
1201 if (IS_ERR(entry)) 1062 if (IS_ERR(entry))
1202 return PTR_ERR(entry); 1063 return PTR_ERR(entry);
1203 1064
1204 err = audit_add_rule(entry); 1065 err = audit_add_rule(entry);
1205 audit_log_rule_change(loginuid, sessionid, sid, "add rule", 1066 audit_log_rule_change("add rule", &entry->rule, !err);
1206 &entry->rule, !err);
1207
1208 if (err) 1067 if (err)
1209 audit_free_rule(entry); 1068 audit_free_rule(entry);
1210 break; 1069 break;
1211 case AUDIT_DEL:
1212 case AUDIT_DEL_RULE: 1070 case AUDIT_DEL_RULE:
1213 if (type == AUDIT_DEL) 1071 entry = audit_data_to_entry(data, datasz);
1214 entry = audit_rule_to_entry(data);
1215 else
1216 entry = audit_data_to_entry(data, datasz);
1217 if (IS_ERR(entry)) 1072 if (IS_ERR(entry))
1218 return PTR_ERR(entry); 1073 return PTR_ERR(entry);
1219 1074
1220 err = audit_del_rule(entry); 1075 err = audit_del_rule(entry);
1221 audit_log_rule_change(loginuid, sessionid, sid, "remove rule", 1076 audit_log_rule_change("remove rule", &entry->rule, !err);
1222 &entry->rule, !err);
1223
1224 audit_free_rule(entry); 1077 audit_free_rule(entry);
1225 break; 1078 break;
1226 default: 1079 default:
@@ -1358,7 +1211,7 @@ int audit_compare_dname_path(const char *dname, const char *path, int parentlen)
1358 return strncmp(p, dname, dlen); 1211 return strncmp(p, dname, dlen);
1359} 1212}
1360 1213
1361static int audit_filter_user_rules(struct audit_krule *rule, 1214static int audit_filter_user_rules(struct audit_krule *rule, int type,
1362 enum audit_state *state) 1215 enum audit_state *state)
1363{ 1216{
1364 int i; 1217 int i;
@@ -1382,6 +1235,13 @@ static int audit_filter_user_rules(struct audit_krule *rule,
1382 result = audit_uid_comparator(audit_get_loginuid(current), 1235 result = audit_uid_comparator(audit_get_loginuid(current),
1383 f->op, f->uid); 1236 f->op, f->uid);
1384 break; 1237 break;
1238 case AUDIT_LOGINUID_SET:
1239 result = audit_comparator(audit_loginuid_set(current),
1240 f->op, f->val);
1241 break;
1242 case AUDIT_MSGTYPE:
1243 result = audit_comparator(type, f->op, f->val);
1244 break;
1385 case AUDIT_SUBJ_USER: 1245 case AUDIT_SUBJ_USER:
1386 case AUDIT_SUBJ_ROLE: 1246 case AUDIT_SUBJ_ROLE:
1387 case AUDIT_SUBJ_TYPE: 1247 case AUDIT_SUBJ_TYPE:
@@ -1408,7 +1268,7 @@ static int audit_filter_user_rules(struct audit_krule *rule,
1408 return 1; 1268 return 1;
1409} 1269}
1410 1270
1411int audit_filter_user(void) 1271int audit_filter_user(int type)
1412{ 1272{
1413 enum audit_state state = AUDIT_DISABLED; 1273 enum audit_state state = AUDIT_DISABLED;
1414 struct audit_entry *e; 1274 struct audit_entry *e;
@@ -1416,7 +1276,7 @@ int audit_filter_user(void)
1416 1276
1417 rcu_read_lock(); 1277 rcu_read_lock();
1418 list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_USER], list) { 1278 list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_USER], list) {
1419 if (audit_filter_user_rules(&e->rule, &state)) { 1279 if (audit_filter_user_rules(&e->rule, type, &state)) {
1420 if (state == AUDIT_DISABLED) 1280 if (state == AUDIT_DISABLED)
1421 ret = 0; 1281 ret = 0;
1422 break; 1282 break;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index c68229411a7c..3c8a601324a2 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -76,11 +76,6 @@
76#define AUDITSC_SUCCESS 1 76#define AUDITSC_SUCCESS 1
77#define AUDITSC_FAILURE 2 77#define AUDITSC_FAILURE 2
78 78
79/* AUDIT_NAMES is the number of slots we reserve in the audit_context
80 * for saving names from getname(). If we get more names we will allocate
81 * a name dynamically and also add those to the list anchored by names_list. */
82#define AUDIT_NAMES 5
83
84/* no execve audit message should be longer than this (userspace limits) */ 79/* no execve audit message should be longer than this (userspace limits) */
85#define MAX_EXECVE_AUDIT_LEN 7500 80#define MAX_EXECVE_AUDIT_LEN 7500
86 81
@@ -90,44 +85,6 @@ int audit_n_rules;
90/* determines whether we collect data for signals sent */ 85/* determines whether we collect data for signals sent */
91int audit_signals; 86int audit_signals;
92 87
93struct audit_cap_data {
94 kernel_cap_t permitted;
95 kernel_cap_t inheritable;
96 union {
97 unsigned int fE; /* effective bit of a file capability */
98 kernel_cap_t effective; /* effective set of a process */
99 };
100};
101
102/* When fs/namei.c:getname() is called, we store the pointer in name and
103 * we don't let putname() free it (instead we free all of the saved
104 * pointers at syscall exit time).
105 *
106 * Further, in fs/namei.c:path_lookup() we store the inode and device.
107 */
108struct audit_names {
109 struct list_head list; /* audit_context->names_list */
110 struct filename *name;
111 unsigned long ino;
112 dev_t dev;
113 umode_t mode;
114 kuid_t uid;
115 kgid_t gid;
116 dev_t rdev;
117 u32 osid;
118 struct audit_cap_data fcap;
119 unsigned int fcap_ver;
120 int name_len; /* number of name's characters to log */
121 unsigned char type; /* record type */
122 bool name_put; /* call __putname() for this name */
123 /*
124 * This was an allocated audit_names and not from the array of
125 * names allocated in the task audit context. Thus this name
126 * should be freed on syscall exit
127 */
128 bool should_free;
129};
130
131struct audit_aux_data { 88struct audit_aux_data {
132 struct audit_aux_data *next; 89 struct audit_aux_data *next;
133 int type; 90 int type;
@@ -175,106 +132,6 @@ struct audit_tree_refs {
175 struct audit_chunk *c[31]; 132 struct audit_chunk *c[31];
176}; 133};
177 134
178/* The per-task audit context. */
179struct audit_context {
180 int dummy; /* must be the first element */
181 int in_syscall; /* 1 if task is in a syscall */
182 enum audit_state state, current_state;
183 unsigned int serial; /* serial number for record */
184 int major; /* syscall number */
185 struct timespec ctime; /* time of syscall entry */
186 unsigned long argv[4]; /* syscall arguments */
187 long return_code;/* syscall return code */
188 u64 prio;
189 int return_valid; /* return code is valid */
190 /*
191 * The names_list is the list of all audit_names collected during this
192 * syscall. The first AUDIT_NAMES entries in the names_list will
193 * actually be from the preallocated_names array for performance
194 * reasons. Except during allocation they should never be referenced
195 * through the preallocated_names array and should only be found/used
196 * by running the names_list.
197 */
198 struct audit_names preallocated_names[AUDIT_NAMES];
199 int name_count; /* total records in names_list */
200 struct list_head names_list; /* anchor for struct audit_names->list */
201 char * filterkey; /* key for rule that triggered record */
202 struct path pwd;
203 struct audit_aux_data *aux;
204 struct audit_aux_data *aux_pids;
205 struct sockaddr_storage *sockaddr;
206 size_t sockaddr_len;
207 /* Save things to print about task_struct */
208 pid_t pid, ppid;
209 kuid_t uid, euid, suid, fsuid;
210 kgid_t gid, egid, sgid, fsgid;
211 unsigned long personality;
212 int arch;
213
214 pid_t target_pid;
215 kuid_t target_auid;
216 kuid_t target_uid;
217 unsigned int target_sessionid;
218 u32 target_sid;
219 char target_comm[TASK_COMM_LEN];
220
221 struct audit_tree_refs *trees, *first_trees;
222 struct list_head killed_trees;
223 int tree_count;
224
225 int type;
226 union {
227 struct {
228 int nargs;
229 long args[6];
230 } socketcall;
231 struct {
232 kuid_t uid;
233 kgid_t gid;
234 umode_t mode;
235 u32 osid;
236 int has_perm;
237 uid_t perm_uid;
238 gid_t perm_gid;
239 umode_t perm_mode;
240 unsigned long qbytes;
241 } ipc;
242 struct {
243 mqd_t mqdes;
244 struct mq_attr mqstat;
245 } mq_getsetattr;
246 struct {
247 mqd_t mqdes;
248 int sigev_signo;
249 } mq_notify;
250 struct {
251 mqd_t mqdes;
252 size_t msg_len;
253 unsigned int msg_prio;
254 struct timespec abs_timeout;
255 } mq_sendrecv;
256 struct {
257 int oflag;
258 umode_t mode;
259 struct mq_attr attr;
260 } mq_open;
261 struct {
262 pid_t pid;
263 struct audit_cap_data cap;
264 } capset;
265 struct {
266 int fd;
267 int flags;
268 } mmap;
269 };
270 int fds[2];
271
272#if AUDIT_DEBUG
273 int put_count;
274 int ino_count;
275#endif
276};
277
278static inline int open_arg(int flags, int mask) 135static inline int open_arg(int flags, int mask)
279{ 136{
280 int n = ACC_MODE(flags); 137 int n = ACC_MODE(flags);
@@ -633,9 +490,23 @@ static int audit_filter_rules(struct task_struct *tsk,
633 break; 490 break;
634 case AUDIT_GID: 491 case AUDIT_GID:
635 result = audit_gid_comparator(cred->gid, f->op, f->gid); 492 result = audit_gid_comparator(cred->gid, f->op, f->gid);
493 if (f->op == Audit_equal) {
494 if (!result)
495 result = in_group_p(f->gid);
496 } else if (f->op == Audit_not_equal) {
497 if (result)
498 result = !in_group_p(f->gid);
499 }
636 break; 500 break;
637 case AUDIT_EGID: 501 case AUDIT_EGID:
638 result = audit_gid_comparator(cred->egid, f->op, f->gid); 502 result = audit_gid_comparator(cred->egid, f->op, f->gid);
503 if (f->op == Audit_equal) {
504 if (!result)
505 result = in_egroup_p(f->gid);
506 } else if (f->op == Audit_not_equal) {
507 if (result)
508 result = !in_egroup_p(f->gid);
509 }
639 break; 510 break;
640 case AUDIT_SGID: 511 case AUDIT_SGID:
641 result = audit_gid_comparator(cred->sgid, f->op, f->gid); 512 result = audit_gid_comparator(cred->sgid, f->op, f->gid);
@@ -742,6 +613,9 @@ static int audit_filter_rules(struct task_struct *tsk,
742 if (ctx) 613 if (ctx)
743 result = audit_uid_comparator(tsk->loginuid, f->op, f->uid); 614 result = audit_uid_comparator(tsk->loginuid, f->op, f->uid);
744 break; 615 break;
616 case AUDIT_LOGINUID_SET:
617 result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val);
618 break;
745 case AUDIT_SUBJ_USER: 619 case AUDIT_SUBJ_USER:
746 case AUDIT_SUBJ_ROLE: 620 case AUDIT_SUBJ_ROLE:
747 case AUDIT_SUBJ_TYPE: 621 case AUDIT_SUBJ_TYPE:
@@ -987,6 +861,8 @@ static inline void audit_free_names(struct audit_context *context)
987 861
988#if AUDIT_DEBUG == 2 862#if AUDIT_DEBUG == 2
989 if (context->put_count + context->ino_count != context->name_count) { 863 if (context->put_count + context->ino_count != context->name_count) {
864 int i = 0;
865
990 printk(KERN_ERR "%s:%d(:%d): major=%d in_syscall=%d" 866 printk(KERN_ERR "%s:%d(:%d): major=%d in_syscall=%d"
991 " name_count=%d put_count=%d" 867 " name_count=%d put_count=%d"
992 " ino_count=%d [NOT freeing]\n", 868 " ino_count=%d [NOT freeing]\n",
@@ -995,7 +871,7 @@ static inline void audit_free_names(struct audit_context *context)
995 context->name_count, context->put_count, 871 context->name_count, context->put_count,
996 context->ino_count); 872 context->ino_count);
997 list_for_each_entry(n, &context->names_list, list) { 873 list_for_each_entry(n, &context->names_list, list) {
998 printk(KERN_ERR "names[%d] = %p = %s\n", i, 874 printk(KERN_ERR "names[%d] = %p = %s\n", i++,
999 n->name, n->name->name ?: "(null)"); 875 n->name, n->name->name ?: "(null)");
1000 } 876 }
1001 dump_stack(); 877 dump_stack();
@@ -1010,7 +886,7 @@ static inline void audit_free_names(struct audit_context *context)
1010 list_for_each_entry_safe(n, next, &context->names_list, list) { 886 list_for_each_entry_safe(n, next, &context->names_list, list) {
1011 list_del(&n->list); 887 list_del(&n->list);
1012 if (n->name && n->name_put) 888 if (n->name && n->name_put)
1013 __putname(n->name); 889 final_putname(n->name);
1014 if (n->should_free) 890 if (n->should_free)
1015 kfree(n); 891 kfree(n);
1016 } 892 }
@@ -1093,88 +969,6 @@ static inline void audit_free_context(struct audit_context *context)
1093 kfree(context); 969 kfree(context);
1094} 970}
1095 971
1096void audit_log_task_context(struct audit_buffer *ab)
1097{
1098 char *ctx = NULL;
1099 unsigned len;
1100 int error;
1101 u32 sid;
1102
1103 security_task_getsecid(current, &sid);
1104 if (!sid)
1105 return;
1106
1107 error = security_secid_to_secctx(sid, &ctx, &len);
1108 if (error) {
1109 if (error != -EINVAL)
1110 goto error_path;
1111 return;
1112 }
1113
1114 audit_log_format(ab, " subj=%s", ctx);
1115 security_release_secctx(ctx, len);
1116 return;
1117
1118error_path:
1119 audit_panic("error in audit_log_task_context");
1120 return;
1121}
1122
1123EXPORT_SYMBOL(audit_log_task_context);
1124
1125void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
1126{
1127 const struct cred *cred;
1128 char name[sizeof(tsk->comm)];
1129 struct mm_struct *mm = tsk->mm;
1130 char *tty;
1131
1132 if (!ab)
1133 return;
1134
1135 /* tsk == current */
1136 cred = current_cred();
1137
1138 spin_lock_irq(&tsk->sighand->siglock);
1139 if (tsk->signal && tsk->signal->tty)
1140 tty = tsk->signal->tty->name;
1141 else
1142 tty = "(none)";
1143 spin_unlock_irq(&tsk->sighand->siglock);
1144
1145
1146 audit_log_format(ab,
1147 " ppid=%ld pid=%d auid=%u uid=%u gid=%u"
1148 " euid=%u suid=%u fsuid=%u"
1149 " egid=%u sgid=%u fsgid=%u ses=%u tty=%s",
1150 sys_getppid(),
1151 tsk->pid,
1152 from_kuid(&init_user_ns, tsk->loginuid),
1153 from_kuid(&init_user_ns, cred->uid),
1154 from_kgid(&init_user_ns, cred->gid),
1155 from_kuid(&init_user_ns, cred->euid),
1156 from_kuid(&init_user_ns, cred->suid),
1157 from_kuid(&init_user_ns, cred->fsuid),
1158 from_kgid(&init_user_ns, cred->egid),
1159 from_kgid(&init_user_ns, cred->sgid),
1160 from_kgid(&init_user_ns, cred->fsgid),
1161 tsk->sessionid, tty);
1162
1163 get_task_comm(name, tsk);
1164 audit_log_format(ab, " comm=");
1165 audit_log_untrustedstring(ab, name);
1166
1167 if (mm) {
1168 down_read(&mm->mmap_sem);
1169 if (mm->exe_file)
1170 audit_log_d_path(ab, " exe=", &mm->exe_file->f_path);
1171 up_read(&mm->mmap_sem);
1172 }
1173 audit_log_task_context(ab);
1174}
1175
1176EXPORT_SYMBOL(audit_log_task_info);
1177
1178static int audit_log_pid_context(struct audit_context *context, pid_t pid, 972static int audit_log_pid_context(struct audit_context *context, pid_t pid,
1179 kuid_t auid, kuid_t uid, unsigned int sessionid, 973 kuid_t auid, kuid_t uid, unsigned int sessionid,
1180 u32 sid, char *comm) 974 u32 sid, char *comm)
@@ -1191,12 +985,14 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid,
1191 audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid, 985 audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid,
1192 from_kuid(&init_user_ns, auid), 986 from_kuid(&init_user_ns, auid),
1193 from_kuid(&init_user_ns, uid), sessionid); 987 from_kuid(&init_user_ns, uid), sessionid);
1194 if (security_secid_to_secctx(sid, &ctx, &len)) { 988 if (sid) {
1195 audit_log_format(ab, " obj=(none)"); 989 if (security_secid_to_secctx(sid, &ctx, &len)) {
1196 rc = 1; 990 audit_log_format(ab, " obj=(none)");
1197 } else { 991 rc = 1;
1198 audit_log_format(ab, " obj=%s", ctx); 992 } else {
1199 security_release_secctx(ctx, len); 993 audit_log_format(ab, " obj=%s", ctx);
994 security_release_secctx(ctx, len);
995 }
1200 } 996 }
1201 audit_log_format(ab, " ocomm="); 997 audit_log_format(ab, " ocomm=");
1202 audit_log_untrustedstring(ab, comm); 998 audit_log_untrustedstring(ab, comm);
@@ -1390,35 +1186,6 @@ static void audit_log_execve_info(struct audit_context *context,
1390 kfree(buf); 1186 kfree(buf);
1391} 1187}
1392 1188
1393static void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
1394{
1395 int i;
1396
1397 audit_log_format(ab, " %s=", prefix);
1398 CAP_FOR_EACH_U32(i) {
1399 audit_log_format(ab, "%08x", cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
1400 }
1401}
1402
1403static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name)
1404{
1405 kernel_cap_t *perm = &name->fcap.permitted;
1406 kernel_cap_t *inh = &name->fcap.inheritable;
1407 int log = 0;
1408
1409 if (!cap_isclear(*perm)) {
1410 audit_log_cap(ab, "cap_fp", perm);
1411 log = 1;
1412 }
1413 if (!cap_isclear(*inh)) {
1414 audit_log_cap(ab, "cap_fi", inh);
1415 log = 1;
1416 }
1417
1418 if (log)
1419 audit_log_format(ab, " cap_fe=%d cap_fver=%x", name->fcap.fE, name->fcap_ver);
1420}
1421
1422static void show_special(struct audit_context *context, int *call_panic) 1189static void show_special(struct audit_context *context, int *call_panic)
1423{ 1190{
1424 struct audit_buffer *ab; 1191 struct audit_buffer *ab;
@@ -1516,68 +1283,6 @@ static void show_special(struct audit_context *context, int *call_panic)
1516 audit_log_end(ab); 1283 audit_log_end(ab);
1517} 1284}
1518 1285
1519static void audit_log_name(struct audit_context *context, struct audit_names *n,
1520 int record_num, int *call_panic)
1521{
1522 struct audit_buffer *ab;
1523 ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH);
1524 if (!ab)
1525 return; /* audit_panic has been called */
1526
1527 audit_log_format(ab, "item=%d", record_num);
1528
1529 if (n->name) {
1530 switch (n->name_len) {
1531 case AUDIT_NAME_FULL:
1532 /* log the full path */
1533 audit_log_format(ab, " name=");
1534 audit_log_untrustedstring(ab, n->name->name);
1535 break;
1536 case 0:
1537 /* name was specified as a relative path and the
1538 * directory component is the cwd */
1539 audit_log_d_path(ab, " name=", &context->pwd);
1540 break;
1541 default:
1542 /* log the name's directory component */
1543 audit_log_format(ab, " name=");
1544 audit_log_n_untrustedstring(ab, n->name->name,
1545 n->name_len);
1546 }
1547 } else
1548 audit_log_format(ab, " name=(null)");
1549
1550 if (n->ino != (unsigned long)-1) {
1551 audit_log_format(ab, " inode=%lu"
1552 " dev=%02x:%02x mode=%#ho"
1553 " ouid=%u ogid=%u rdev=%02x:%02x",
1554 n->ino,
1555 MAJOR(n->dev),
1556 MINOR(n->dev),
1557 n->mode,
1558 from_kuid(&init_user_ns, n->uid),
1559 from_kgid(&init_user_ns, n->gid),
1560 MAJOR(n->rdev),
1561 MINOR(n->rdev));
1562 }
1563 if (n->osid != 0) {
1564 char *ctx = NULL;
1565 u32 len;
1566 if (security_secid_to_secctx(
1567 n->osid, &ctx, &len)) {
1568 audit_log_format(ab, " osid=%u", n->osid);
1569 *call_panic = 2;
1570 } else {
1571 audit_log_format(ab, " obj=%s", ctx);
1572 security_release_secctx(ctx, len);
1573 }
1574 }
1575
1576 audit_log_fcaps(ab, n);
1577
1578 audit_log_end(ab);
1579}
1580
1581static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) 1286static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
1582{ 1287{
1583 int i, call_panic = 0; 1288 int i, call_panic = 0;
@@ -1695,7 +1400,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
1695 1400
1696 i = 0; 1401 i = 0;
1697 list_for_each_entry(n, &context->names_list, list) 1402 list_for_each_entry(n, &context->names_list, list)
1698 audit_log_name(context, n, i++, &call_panic); 1403 audit_log_name(context, n, NULL, i++, &call_panic);
1699 1404
1700 /* Send end of event record to help user space know we are finished */ 1405 /* Send end of event record to help user space know we are finished */
1701 ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE); 1406 ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE);
@@ -2030,18 +1735,18 @@ void audit_putname(struct filename *name)
2030 BUG_ON(!context); 1735 BUG_ON(!context);
2031 if (!context->in_syscall) { 1736 if (!context->in_syscall) {
2032#if AUDIT_DEBUG == 2 1737#if AUDIT_DEBUG == 2
2033 printk(KERN_ERR "%s:%d(:%d): __putname(%p)\n", 1738 printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n",
2034 __FILE__, __LINE__, context->serial, name); 1739 __FILE__, __LINE__, context->serial, name);
2035 if (context->name_count) { 1740 if (context->name_count) {
2036 struct audit_names *n; 1741 struct audit_names *n;
2037 int i; 1742 int i = 0;
2038 1743
2039 list_for_each_entry(n, &context->names_list, list) 1744 list_for_each_entry(n, &context->names_list, list)
2040 printk(KERN_ERR "name[%d] = %p = %s\n", i, 1745 printk(KERN_ERR "name[%d] = %p = %s\n", i++,
2041 n->name, n->name->name ?: "(null)"); 1746 n->name, n->name->name ?: "(null)");
2042 } 1747 }
2043#endif 1748#endif
2044 __putname(name); 1749 final_putname(name);
2045 } 1750 }
2046#if AUDIT_DEBUG 1751#if AUDIT_DEBUG
2047 else { 1752 else {
@@ -2060,41 +1765,6 @@ void audit_putname(struct filename *name)
2060#endif 1765#endif
2061} 1766}
2062 1767
2063static inline int audit_copy_fcaps(struct audit_names *name, const struct dentry *dentry)
2064{
2065 struct cpu_vfs_cap_data caps;
2066 int rc;
2067
2068 if (!dentry)
2069 return 0;
2070
2071 rc = get_vfs_caps_from_disk(dentry, &caps);
2072 if (rc)
2073 return rc;
2074
2075 name->fcap.permitted = caps.permitted;
2076 name->fcap.inheritable = caps.inheritable;
2077 name->fcap.fE = !!(caps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE);
2078 name->fcap_ver = (caps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT;
2079
2080 return 0;
2081}
2082
2083
2084/* Copy inode data into an audit_names. */
2085static void audit_copy_inode(struct audit_names *name, const struct dentry *dentry,
2086 const struct inode *inode)
2087{
2088 name->ino = inode->i_ino;
2089 name->dev = inode->i_sb->s_dev;
2090 name->mode = inode->i_mode;
2091 name->uid = inode->i_uid;
2092 name->gid = inode->i_gid;
2093 name->rdev = inode->i_rdev;
2094 security_inode_getsecid(inode, &name->osid);
2095 audit_copy_fcaps(name, dentry);
2096}
2097
2098/** 1768/**
2099 * __audit_inode - store the inode and device from a lookup 1769 * __audit_inode - store the inode and device from a lookup
2100 * @name: name being audited 1770 * @name: name being audited
@@ -2303,7 +1973,7 @@ int audit_set_loginuid(kuid_t loginuid)
2303 unsigned int sessionid; 1973 unsigned int sessionid;
2304 1974
2305#ifdef CONFIG_AUDIT_LOGINUID_IMMUTABLE 1975#ifdef CONFIG_AUDIT_LOGINUID_IMMUTABLE
2306 if (uid_valid(task->loginuid)) 1976 if (audit_loginuid_set(task))
2307 return -EPERM; 1977 return -EPERM;
2308#else /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */ 1978#else /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
2309 if (!capable(CAP_AUDIT_CONTROL)) 1979 if (!capable(CAP_AUDIT_CONTROL))
@@ -2471,17 +2141,20 @@ int __audit_bprm(struct linux_binprm *bprm)
2471 2141
2472/** 2142/**
2473 * audit_socketcall - record audit data for sys_socketcall 2143 * audit_socketcall - record audit data for sys_socketcall
2474 * @nargs: number of args 2144 * @nargs: number of args, which should not be more than AUDITSC_ARGS.
2475 * @args: args array 2145 * @args: args array
2476 * 2146 *
2477 */ 2147 */
2478void __audit_socketcall(int nargs, unsigned long *args) 2148int __audit_socketcall(int nargs, unsigned long *args)
2479{ 2149{
2480 struct audit_context *context = current->audit_context; 2150 struct audit_context *context = current->audit_context;
2481 2151
2152 if (nargs <= 0 || nargs > AUDITSC_ARGS || !args)
2153 return -EINVAL;
2482 context->type = AUDIT_SOCKETCALL; 2154 context->type = AUDIT_SOCKETCALL;
2483 context->socketcall.nargs = nargs; 2155 context->socketcall.nargs = nargs;
2484 memcpy(context->socketcall.args, args, nargs * sizeof(unsigned long)); 2156 memcpy(context->socketcall.args, args, nargs * sizeof(unsigned long));
2157 return 0;
2485} 2158}
2486 2159
2487/** 2160/**
diff --git a/kernel/params.c b/kernel/params.c
index ed35345be536..53b958fcd639 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -613,10 +613,13 @@ static __modinit int add_sysfs_param(struct module_kobject *mk,
613 sizeof(*mk->mp) + sizeof(mk->mp->attrs[0]) * (num+1), 613 sizeof(*mk->mp) + sizeof(mk->mp->attrs[0]) * (num+1),
614 GFP_KERNEL); 614 GFP_KERNEL);
615 if (!new) { 615 if (!new) {
616 kfree(mk->mp); 616 kfree(attrs);
617 err = -ENOMEM; 617 err = -ENOMEM;
618 goto fail; 618 goto fail;
619 } 619 }
620 /* Despite looking like the typical realloc() bug, this is safe.
621 * We *want* the old 'attrs' to be freed either way, and we'll store
622 * the new one in the success case. */
620 attrs = krealloc(attrs, sizeof(new->grp.attrs[0])*(num+2), GFP_KERNEL); 623 attrs = krealloc(attrs, sizeof(new->grp.attrs[0])*(num+2), GFP_KERNEL);
621 if (!attrs) { 624 if (!attrs) {
622 err = -ENOMEM; 625 err = -ENOMEM;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index bfd6787b355a..7078052284fd 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -200,6 +200,7 @@ cond_syscall(sys_perf_event_open);
200/* fanotify! */ 200/* fanotify! */
201cond_syscall(sys_fanotify_init); 201cond_syscall(sys_fanotify_init);
202cond_syscall(sys_fanotify_mark); 202cond_syscall(sys_fanotify_mark);
203cond_syscall(compat_sys_fanotify_mark);
203 204
204/* open by handle */ 205/* open by handle */
205cond_syscall(sys_name_to_handle_at); 206cond_syscall(sys_name_to_handle_at);
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index ebf72358e86a..aea4a9ea6fc8 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -15,6 +15,7 @@
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/compat.h>
18 19
19#ifdef CONFIG_SYSCTL_SYSCALL 20#ifdef CONFIG_SYSCTL_SYSCALL
20 21
@@ -1447,7 +1448,6 @@ SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
1447 1448
1448 1449
1449#ifdef CONFIG_COMPAT 1450#ifdef CONFIG_COMPAT
1450#include <asm/compat.h>
1451 1451
1452struct compat_sysctl_args { 1452struct compat_sysctl_args {
1453 compat_uptr_t name; 1453 compat_uptr_t name;
@@ -1459,7 +1459,7 @@ struct compat_sysctl_args {
1459 compat_ulong_t __unused[4]; 1459 compat_ulong_t __unused[4];
1460}; 1460};
1461 1461
1462asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args) 1462COMPAT_SYSCALL_DEFINE1(sysctl, struct compat_sysctl_args __user *, args)
1463{ 1463{
1464 struct compat_sysctl_args tmp; 1464 struct compat_sysctl_args tmp;
1465 compat_size_t __user *compat_oldlenp; 1465 compat_size_t __user *compat_oldlenp;
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 5e9efd4b83a4..015f85aaca08 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -71,6 +71,7 @@ config TRACE_CLOCK
71config RING_BUFFER 71config RING_BUFFER
72 bool 72 bool
73 select TRACE_CLOCK 73 select TRACE_CLOCK
74 select IRQ_WORK
74 75
75config FTRACE_NMI_ENTER 76config FTRACE_NMI_ENTER
76 bool 77 bool
@@ -107,7 +108,6 @@ config TRACING
107 select BINARY_PRINTF 108 select BINARY_PRINTF
108 select EVENT_TRACING 109 select EVENT_TRACING
109 select TRACE_CLOCK 110 select TRACE_CLOCK
110 select IRQ_WORK
111 111
112config GENERIC_TRACER 112config GENERIC_TRACER
113 bool 113 bool
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8a5c017bb50c..b549b0f5b977 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -64,6 +64,13 @@
64 64
65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) 65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66 66
67#ifdef CONFIG_DYNAMIC_FTRACE
68#define INIT_REGEX_LOCK(opsname) \
69 .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock),
70#else
71#define INIT_REGEX_LOCK(opsname)
72#endif
73
67static struct ftrace_ops ftrace_list_end __read_mostly = { 74static struct ftrace_ops ftrace_list_end __read_mostly = {
68 .func = ftrace_stub, 75 .func = ftrace_stub,
69 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, 76 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
@@ -131,6 +138,16 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
131 while (likely(op = rcu_dereference_raw((op)->next)) && \ 138 while (likely(op = rcu_dereference_raw((op)->next)) && \
132 unlikely((op) != &ftrace_list_end)) 139 unlikely((op) != &ftrace_list_end))
133 140
141static inline void ftrace_ops_init(struct ftrace_ops *ops)
142{
143#ifdef CONFIG_DYNAMIC_FTRACE
144 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
145 mutex_init(&ops->regex_lock);
146 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
147 }
148#endif
149}
150
134/** 151/**
135 * ftrace_nr_registered_ops - return number of ops registered 152 * ftrace_nr_registered_ops - return number of ops registered
136 * 153 *
@@ -907,7 +924,8 @@ static void unregister_ftrace_profiler(void)
907#else 924#else
908static struct ftrace_ops ftrace_profile_ops __read_mostly = { 925static struct ftrace_ops ftrace_profile_ops __read_mostly = {
909 .func = function_profile_call, 926 .func = function_profile_call,
910 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 927 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
928 INIT_REGEX_LOCK(ftrace_profile_ops)
911}; 929};
912 930
913static int register_ftrace_profiler(void) 931static int register_ftrace_profiler(void)
@@ -1103,11 +1121,10 @@ static struct ftrace_ops global_ops = {
1103 .func = ftrace_stub, 1121 .func = ftrace_stub,
1104 .notrace_hash = EMPTY_HASH, 1122 .notrace_hash = EMPTY_HASH,
1105 .filter_hash = EMPTY_HASH, 1123 .filter_hash = EMPTY_HASH,
1106 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 1124 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
1125 INIT_REGEX_LOCK(global_ops)
1107}; 1126};
1108 1127
1109static DEFINE_MUTEX(ftrace_regex_lock);
1110
1111struct ftrace_page { 1128struct ftrace_page {
1112 struct ftrace_page *next; 1129 struct ftrace_page *next;
1113 struct dyn_ftrace *records; 1130 struct dyn_ftrace *records;
@@ -1247,6 +1264,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1247 1264
1248void ftrace_free_filter(struct ftrace_ops *ops) 1265void ftrace_free_filter(struct ftrace_ops *ops)
1249{ 1266{
1267 ftrace_ops_init(ops);
1250 free_ftrace_hash(ops->filter_hash); 1268 free_ftrace_hash(ops->filter_hash);
1251 free_ftrace_hash(ops->notrace_hash); 1269 free_ftrace_hash(ops->notrace_hash);
1252} 1270}
@@ -2441,7 +2459,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
2441 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || 2459 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2442 2460
2443 ((iter->flags & FTRACE_ITER_ENABLED) && 2461 ((iter->flags & FTRACE_ITER_ENABLED) &&
2444 !(rec->flags & ~FTRACE_FL_MASK))) { 2462 !(rec->flags & FTRACE_FL_ENABLED))) {
2445 2463
2446 rec = NULL; 2464 rec = NULL;
2447 goto retry; 2465 goto retry;
@@ -2624,6 +2642,8 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
2624 struct ftrace_hash *hash; 2642 struct ftrace_hash *hash;
2625 int ret = 0; 2643 int ret = 0;
2626 2644
2645 ftrace_ops_init(ops);
2646
2627 if (unlikely(ftrace_disabled)) 2647 if (unlikely(ftrace_disabled))
2628 return -ENODEV; 2648 return -ENODEV;
2629 2649
@@ -2636,28 +2656,26 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
2636 return -ENOMEM; 2656 return -ENOMEM;
2637 } 2657 }
2638 2658
2659 iter->ops = ops;
2660 iter->flags = flag;
2661
2662 mutex_lock(&ops->regex_lock);
2663
2639 if (flag & FTRACE_ITER_NOTRACE) 2664 if (flag & FTRACE_ITER_NOTRACE)
2640 hash = ops->notrace_hash; 2665 hash = ops->notrace_hash;
2641 else 2666 else
2642 hash = ops->filter_hash; 2667 hash = ops->filter_hash;
2643 2668
2644 iter->ops = ops;
2645 iter->flags = flag;
2646
2647 if (file->f_mode & FMODE_WRITE) { 2669 if (file->f_mode & FMODE_WRITE) {
2648 mutex_lock(&ftrace_lock);
2649 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash); 2670 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2650 mutex_unlock(&ftrace_lock);
2651
2652 if (!iter->hash) { 2671 if (!iter->hash) {
2653 trace_parser_put(&iter->parser); 2672 trace_parser_put(&iter->parser);
2654 kfree(iter); 2673 kfree(iter);
2655 return -ENOMEM; 2674 ret = -ENOMEM;
2675 goto out_unlock;
2656 } 2676 }
2657 } 2677 }
2658 2678
2659 mutex_lock(&ftrace_regex_lock);
2660
2661 if ((file->f_mode & FMODE_WRITE) && 2679 if ((file->f_mode & FMODE_WRITE) &&
2662 (file->f_flags & O_TRUNC)) 2680 (file->f_flags & O_TRUNC))
2663 ftrace_filter_reset(iter->hash); 2681 ftrace_filter_reset(iter->hash);
@@ -2677,7 +2695,9 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
2677 } 2695 }
2678 } else 2696 } else
2679 file->private_data = iter; 2697 file->private_data = iter;
2680 mutex_unlock(&ftrace_regex_lock); 2698
2699 out_unlock:
2700 mutex_unlock(&ops->regex_lock);
2681 2701
2682 return ret; 2702 return ret;
2683} 2703}
@@ -2910,6 +2930,8 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2910static struct ftrace_ops trace_probe_ops __read_mostly = 2930static struct ftrace_ops trace_probe_ops __read_mostly =
2911{ 2931{
2912 .func = function_trace_probe_call, 2932 .func = function_trace_probe_call,
2933 .flags = FTRACE_OPS_FL_INITIALIZED,
2934 INIT_REGEX_LOCK(trace_probe_ops)
2913}; 2935};
2914 2936
2915static int ftrace_probe_registered; 2937static int ftrace_probe_registered;
@@ -2919,8 +2941,12 @@ static void __enable_ftrace_function_probe(void)
2919 int ret; 2941 int ret;
2920 int i; 2942 int i;
2921 2943
2922 if (ftrace_probe_registered) 2944 if (ftrace_probe_registered) {
2945 /* still need to update the function call sites */
2946 if (ftrace_enabled)
2947 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2923 return; 2948 return;
2949 }
2924 2950
2925 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 2951 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2926 struct hlist_head *hhd = &ftrace_func_hash[i]; 2952 struct hlist_head *hhd = &ftrace_func_hash[i];
@@ -2990,19 +3016,21 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2990 if (WARN_ON(not)) 3016 if (WARN_ON(not))
2991 return -EINVAL; 3017 return -EINVAL;
2992 3018
2993 mutex_lock(&ftrace_lock); 3019 mutex_lock(&trace_probe_ops.regex_lock);
2994 3020
2995 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3021 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2996 if (!hash) { 3022 if (!hash) {
2997 count = -ENOMEM; 3023 count = -ENOMEM;
2998 goto out_unlock; 3024 goto out;
2999 } 3025 }
3000 3026
3001 if (unlikely(ftrace_disabled)) { 3027 if (unlikely(ftrace_disabled)) {
3002 count = -ENODEV; 3028 count = -ENODEV;
3003 goto out_unlock; 3029 goto out;
3004 } 3030 }
3005 3031
3032 mutex_lock(&ftrace_lock);
3033
3006 do_for_each_ftrace_rec(pg, rec) { 3034 do_for_each_ftrace_rec(pg, rec) {
3007 3035
3008 if (!ftrace_match_record(rec, NULL, search, len, type)) 3036 if (!ftrace_match_record(rec, NULL, search, len, type))
@@ -3056,6 +3084,8 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3056 3084
3057 out_unlock: 3085 out_unlock:
3058 mutex_unlock(&ftrace_lock); 3086 mutex_unlock(&ftrace_lock);
3087 out:
3088 mutex_unlock(&trace_probe_ops.regex_lock);
3059 free_ftrace_hash(hash); 3089 free_ftrace_hash(hash);
3060 3090
3061 return count; 3091 return count;
@@ -3095,7 +3125,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3095 return; 3125 return;
3096 } 3126 }
3097 3127
3098 mutex_lock(&ftrace_lock); 3128 mutex_lock(&trace_probe_ops.regex_lock);
3099 3129
3100 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3130 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3101 if (!hash) 3131 if (!hash)
@@ -3133,6 +3163,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3133 list_add(&entry->free_list, &free_list); 3163 list_add(&entry->free_list, &free_list);
3134 } 3164 }
3135 } 3165 }
3166 mutex_lock(&ftrace_lock);
3136 __disable_ftrace_function_probe(); 3167 __disable_ftrace_function_probe();
3137 /* 3168 /*
3138 * Remove after the disable is called. Otherwise, if the last 3169 * Remove after the disable is called. Otherwise, if the last
@@ -3144,9 +3175,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3144 list_del(&entry->free_list); 3175 list_del(&entry->free_list);
3145 ftrace_free_entry(entry); 3176 ftrace_free_entry(entry);
3146 } 3177 }
3178 mutex_unlock(&ftrace_lock);
3147 3179
3148 out_unlock: 3180 out_unlock:
3149 mutex_unlock(&ftrace_lock); 3181 mutex_unlock(&trace_probe_ops.regex_lock);
3150 free_ftrace_hash(hash); 3182 free_ftrace_hash(hash);
3151} 3183}
3152 3184
@@ -3256,18 +3288,17 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
3256 if (!cnt) 3288 if (!cnt)
3257 return 0; 3289 return 0;
3258 3290
3259 mutex_lock(&ftrace_regex_lock);
3260
3261 ret = -ENODEV;
3262 if (unlikely(ftrace_disabled))
3263 goto out_unlock;
3264
3265 if (file->f_mode & FMODE_READ) { 3291 if (file->f_mode & FMODE_READ) {
3266 struct seq_file *m = file->private_data; 3292 struct seq_file *m = file->private_data;
3267 iter = m->private; 3293 iter = m->private;
3268 } else 3294 } else
3269 iter = file->private_data; 3295 iter = file->private_data;
3270 3296
3297 if (unlikely(ftrace_disabled))
3298 return -ENODEV;
3299
3300 /* iter->hash is a local copy, so we don't need regex_lock */
3301
3271 parser = &iter->parser; 3302 parser = &iter->parser;
3272 read = trace_get_user(parser, ubuf, cnt, ppos); 3303 read = trace_get_user(parser, ubuf, cnt, ppos);
3273 3304
@@ -3276,14 +3307,12 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
3276 ret = ftrace_process_regex(iter->hash, parser->buffer, 3307 ret = ftrace_process_regex(iter->hash, parser->buffer,
3277 parser->idx, enable); 3308 parser->idx, enable);
3278 trace_parser_clear(parser); 3309 trace_parser_clear(parser);
3279 if (ret) 3310 if (ret < 0)
3280 goto out_unlock; 3311 goto out;
3281 } 3312 }
3282 3313
3283 ret = read; 3314 ret = read;
3284out_unlock: 3315 out:
3285 mutex_unlock(&ftrace_regex_lock);
3286
3287 return ret; 3316 return ret;
3288} 3317}
3289 3318
@@ -3335,16 +3364,19 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3335 if (unlikely(ftrace_disabled)) 3364 if (unlikely(ftrace_disabled))
3336 return -ENODEV; 3365 return -ENODEV;
3337 3366
3367 mutex_lock(&ops->regex_lock);
3368
3338 if (enable) 3369 if (enable)
3339 orig_hash = &ops->filter_hash; 3370 orig_hash = &ops->filter_hash;
3340 else 3371 else
3341 orig_hash = &ops->notrace_hash; 3372 orig_hash = &ops->notrace_hash;
3342 3373
3343 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3374 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3344 if (!hash) 3375 if (!hash) {
3345 return -ENOMEM; 3376 ret = -ENOMEM;
3377 goto out_regex_unlock;
3378 }
3346 3379
3347 mutex_lock(&ftrace_regex_lock);
3348 if (reset) 3380 if (reset)
3349 ftrace_filter_reset(hash); 3381 ftrace_filter_reset(hash);
3350 if (buf && !ftrace_match_records(hash, buf, len)) { 3382 if (buf && !ftrace_match_records(hash, buf, len)) {
@@ -3366,7 +3398,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3366 mutex_unlock(&ftrace_lock); 3398 mutex_unlock(&ftrace_lock);
3367 3399
3368 out_regex_unlock: 3400 out_regex_unlock:
3369 mutex_unlock(&ftrace_regex_lock); 3401 mutex_unlock(&ops->regex_lock);
3370 3402
3371 free_ftrace_hash(hash); 3403 free_ftrace_hash(hash);
3372 return ret; 3404 return ret;
@@ -3392,6 +3424,7 @@ ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3392int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, 3424int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3393 int remove, int reset) 3425 int remove, int reset)
3394{ 3426{
3427 ftrace_ops_init(ops);
3395 return ftrace_set_addr(ops, ip, remove, reset, 1); 3428 return ftrace_set_addr(ops, ip, remove, reset, 1);
3396} 3429}
3397EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); 3430EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
@@ -3416,6 +3449,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3416int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 3449int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3417 int len, int reset) 3450 int len, int reset)
3418{ 3451{
3452 ftrace_ops_init(ops);
3419 return ftrace_set_regex(ops, buf, len, reset, 1); 3453 return ftrace_set_regex(ops, buf, len, reset, 1);
3420} 3454}
3421EXPORT_SYMBOL_GPL(ftrace_set_filter); 3455EXPORT_SYMBOL_GPL(ftrace_set_filter);
@@ -3434,6 +3468,7 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter);
3434int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 3468int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3435 int len, int reset) 3469 int len, int reset)
3436{ 3470{
3471 ftrace_ops_init(ops);
3437 return ftrace_set_regex(ops, buf, len, reset, 0); 3472 return ftrace_set_regex(ops, buf, len, reset, 0);
3438} 3473}
3439EXPORT_SYMBOL_GPL(ftrace_set_notrace); 3474EXPORT_SYMBOL_GPL(ftrace_set_notrace);
@@ -3524,6 +3559,8 @@ ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3524{ 3559{
3525 char *func; 3560 char *func;
3526 3561
3562 ftrace_ops_init(ops);
3563
3527 while (buf) { 3564 while (buf) {
3528 func = strsep(&buf, ","); 3565 func = strsep(&buf, ",");
3529 ftrace_set_regex(ops, func, strlen(func), 0, enable); 3566 ftrace_set_regex(ops, func, strlen(func), 0, enable);
@@ -3551,10 +3588,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
3551 int filter_hash; 3588 int filter_hash;
3552 int ret; 3589 int ret;
3553 3590
3554 mutex_lock(&ftrace_regex_lock);
3555 if (file->f_mode & FMODE_READ) { 3591 if (file->f_mode & FMODE_READ) {
3556 iter = m->private; 3592 iter = m->private;
3557
3558 seq_release(inode, file); 3593 seq_release(inode, file);
3559 } else 3594 } else
3560 iter = file->private_data; 3595 iter = file->private_data;
@@ -3567,6 +3602,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
3567 3602
3568 trace_parser_put(parser); 3603 trace_parser_put(parser);
3569 3604
3605 mutex_lock(&iter->ops->regex_lock);
3606
3570 if (file->f_mode & FMODE_WRITE) { 3607 if (file->f_mode & FMODE_WRITE) {
3571 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); 3608 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3572 3609
@@ -3584,10 +3621,11 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
3584 3621
3585 mutex_unlock(&ftrace_lock); 3622 mutex_unlock(&ftrace_lock);
3586 } 3623 }
3624
3625 mutex_unlock(&iter->ops->regex_lock);
3587 free_ftrace_hash(iter->hash); 3626 free_ftrace_hash(iter->hash);
3588 kfree(iter); 3627 kfree(iter);
3589 3628
3590 mutex_unlock(&ftrace_regex_lock);
3591 return 0; 3629 return 0;
3592} 3630}
3593 3631
@@ -4126,7 +4164,8 @@ void __init ftrace_init(void)
4126 4164
4127static struct ftrace_ops global_ops = { 4165static struct ftrace_ops global_ops = {
4128 .func = ftrace_stub, 4166 .func = ftrace_stub,
4129 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 4167 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4168 INIT_REGEX_LOCK(global_ops)
4130}; 4169};
4131 4170
4132static int __init ftrace_nodyn_init(void) 4171static int __init ftrace_nodyn_init(void)
@@ -4180,8 +4219,9 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4180} 4219}
4181 4220
4182static struct ftrace_ops control_ops = { 4221static struct ftrace_ops control_ops = {
4183 .func = ftrace_ops_control_func, 4222 .func = ftrace_ops_control_func,
4184 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 4223 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4224 INIT_REGEX_LOCK(control_ops)
4185}; 4225};
4186 4226
4187static inline void 4227static inline void
@@ -4539,6 +4579,8 @@ int register_ftrace_function(struct ftrace_ops *ops)
4539{ 4579{
4540 int ret = -1; 4580 int ret = -1;
4541 4581
4582 ftrace_ops_init(ops);
4583
4542 mutex_lock(&ftrace_lock); 4584 mutex_lock(&ftrace_lock);
4543 4585
4544 ret = __register_ftrace_function(ops); 4586 ret = __register_ftrace_function(ops);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 53582e982e51..7a0cf68027cc 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -251,7 +251,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
251 switch (enable) { 251 switch (enable) {
252 case 0: 252 case 0:
253 /* 253 /*
254 * When soft_disable is set and enable is cleared, we want 254 * When soft_disable is set and enable is cleared, the sm_ref
255 * reference counter is decremented. If it reaches 0, we want
255 * to clear the SOFT_DISABLED flag but leave the event in the 256 * to clear the SOFT_DISABLED flag but leave the event in the
256 * state that it was. That is, if the event was enabled and 257 * state that it was. That is, if the event was enabled and
257 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED 258 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
@@ -263,6 +264,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
263 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work. 264 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
264 */ 265 */
265 if (soft_disable) { 266 if (soft_disable) {
267 if (atomic_dec_return(&file->sm_ref) > 0)
268 break;
266 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED; 269 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
267 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); 270 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
268 } else 271 } else
@@ -291,8 +294,11 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
291 */ 294 */
292 if (!soft_disable) 295 if (!soft_disable)
293 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); 296 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
294 else 297 else {
298 if (atomic_inc_return(&file->sm_ref) > 1)
299 break;
295 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); 300 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
301 }
296 302
297 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) { 303 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
298 304
@@ -623,6 +629,8 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
623 if (file->flags & FTRACE_EVENT_FL_ENABLED) { 629 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
624 if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED) 630 if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
625 buf = "0*\n"; 631 buf = "0*\n";
632 else if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
633 buf = "1*\n";
626 else 634 else
627 buf = "1\n"; 635 buf = "1\n";
628 } else 636 } else
@@ -1521,6 +1529,24 @@ __register_event(struct ftrace_event_call *call, struct module *mod)
1521 return 0; 1529 return 0;
1522} 1530}
1523 1531
1532static struct ftrace_event_file *
1533trace_create_new_event(struct ftrace_event_call *call,
1534 struct trace_array *tr)
1535{
1536 struct ftrace_event_file *file;
1537
1538 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1539 if (!file)
1540 return NULL;
1541
1542 file->event_call = call;
1543 file->tr = tr;
1544 atomic_set(&file->sm_ref, 0);
1545 list_add(&file->list, &tr->events);
1546
1547 return file;
1548}
1549
1524/* Add an event to a trace directory */ 1550/* Add an event to a trace directory */
1525static int 1551static int
1526__trace_add_new_event(struct ftrace_event_call *call, 1552__trace_add_new_event(struct ftrace_event_call *call,
@@ -1532,14 +1558,10 @@ __trace_add_new_event(struct ftrace_event_call *call,
1532{ 1558{
1533 struct ftrace_event_file *file; 1559 struct ftrace_event_file *file;
1534 1560
1535 file = kmem_cache_alloc(file_cachep, GFP_TRACE); 1561 file = trace_create_new_event(call, tr);
1536 if (!file) 1562 if (!file)
1537 return -ENOMEM; 1563 return -ENOMEM;
1538 1564
1539 file->event_call = call;
1540 file->tr = tr;
1541 list_add(&file->list, &tr->events);
1542
1543 return event_create_dir(tr->event_dir, file, id, enable, filter, format); 1565 return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1544} 1566}
1545 1567
@@ -1554,14 +1576,10 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
1554{ 1576{
1555 struct ftrace_event_file *file; 1577 struct ftrace_event_file *file;
1556 1578
1557 file = kmem_cache_alloc(file_cachep, GFP_TRACE); 1579 file = trace_create_new_event(call, tr);
1558 if (!file) 1580 if (!file)
1559 return -ENOMEM; 1581 return -ENOMEM;
1560 1582
1561 file->event_call = call;
1562 file->tr = tr;
1563 list_add(&file->list, &tr->events);
1564
1565 return 0; 1583 return 0;
1566} 1584}
1567 1585
@@ -2061,8 +2079,18 @@ event_enable_func(struct ftrace_hash *hash,
2061 if (ret < 0) 2079 if (ret < 0)
2062 goto out_put; 2080 goto out_put;
2063 ret = register_ftrace_function_probe(glob, ops, data); 2081 ret = register_ftrace_function_probe(glob, ops, data);
2064 if (!ret) 2082 /*
2083 * The above returns on success the # of functions enabled,
2084 * but if it didn't find any functions it returns zero.
2085 * Consider no functions a failure too.
2086 */
2087 if (!ret) {
2088 ret = -ENOENT;
2089 goto out_disable;
2090 } else if (ret < 0)
2065 goto out_disable; 2091 goto out_disable;
2092 /* Just return zero, not the number of enabled functions */
2093 ret = 0;
2066 out: 2094 out:
2067 mutex_unlock(&event_mutex); 2095 mutex_unlock(&event_mutex);
2068 return ret; 2096 return ret;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 1865d5f76538..636d45fe69b3 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -27,7 +27,6 @@
27/** 27/**
28 * Kprobe event core functions 28 * Kprobe event core functions
29 */ 29 */
30
31struct trace_probe { 30struct trace_probe {
32 struct list_head list; 31 struct list_head list;
33 struct kretprobe rp; /* Use rp.kp for kprobe use */ 32 struct kretprobe rp; /* Use rp.kp for kprobe use */
@@ -36,6 +35,7 @@ struct trace_probe {
36 const char *symbol; /* symbol name */ 35 const char *symbol; /* symbol name */
37 struct ftrace_event_class class; 36 struct ftrace_event_class class;
38 struct ftrace_event_call call; 37 struct ftrace_event_call call;
38 struct ftrace_event_file **files;
39 ssize_t size; /* trace entry size */ 39 ssize_t size; /* trace entry size */
40 unsigned int nr_args; 40 unsigned int nr_args;
41 struct probe_arg args[]; 41 struct probe_arg args[];
@@ -46,7 +46,7 @@ struct trace_probe {
46 (sizeof(struct probe_arg) * (n))) 46 (sizeof(struct probe_arg) * (n)))
47 47
48 48
49static __kprobes int trace_probe_is_return(struct trace_probe *tp) 49static __kprobes bool trace_probe_is_return(struct trace_probe *tp)
50{ 50{
51 return tp->rp.handler != NULL; 51 return tp->rp.handler != NULL;
52} 52}
@@ -183,12 +183,57 @@ static struct trace_probe *find_trace_probe(const char *event,
183 return NULL; 183 return NULL;
184} 184}
185 185
186/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */ 186static int trace_probe_nr_files(struct trace_probe *tp)
187static int enable_trace_probe(struct trace_probe *tp, int flag) 187{
188 struct ftrace_event_file **file = tp->files;
189 int ret = 0;
190
191 if (file)
192 while (*(file++))
193 ret++;
194
195 return ret;
196}
197
198static DEFINE_MUTEX(probe_enable_lock);
199
200/*
201 * Enable trace_probe
202 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
203 */
204static int
205enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
188{ 206{
189 int ret = 0; 207 int ret = 0;
190 208
191 tp->flags |= flag; 209 mutex_lock(&probe_enable_lock);
210
211 if (file) {
212 struct ftrace_event_file **new, **old = tp->files;
213 int n = trace_probe_nr_files(tp);
214
215 /* 1 is for new one and 1 is for stopper */
216 new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
217 GFP_KERNEL);
218 if (!new) {
219 ret = -ENOMEM;
220 goto out_unlock;
221 }
222 memcpy(new, old, n * sizeof(struct ftrace_event_file *));
223 new[n] = file;
224 /* The last one keeps a NULL */
225
226 rcu_assign_pointer(tp->files, new);
227 tp->flags |= TP_FLAG_TRACE;
228
229 if (old) {
230 /* Make sure the probe is done with old files */
231 synchronize_sched();
232 kfree(old);
233 }
234 } else
235 tp->flags |= TP_FLAG_PROFILE;
236
192 if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) && 237 if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
193 !trace_probe_has_gone(tp)) { 238 !trace_probe_has_gone(tp)) {
194 if (trace_probe_is_return(tp)) 239 if (trace_probe_is_return(tp))
@@ -197,19 +242,83 @@ static int enable_trace_probe(struct trace_probe *tp, int flag)
197 ret = enable_kprobe(&tp->rp.kp); 242 ret = enable_kprobe(&tp->rp.kp);
198 } 243 }
199 244
245 out_unlock:
246 mutex_unlock(&probe_enable_lock);
247
200 return ret; 248 return ret;
201} 249}
202 250
203/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */ 251static int
204static void disable_trace_probe(struct trace_probe *tp, int flag) 252trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
253{
254 int i;
255
256 if (tp->files) {
257 for (i = 0; tp->files[i]; i++)
258 if (tp->files[i] == file)
259 return i;
260 }
261
262 return -1;
263}
264
265/*
266 * Disable trace_probe
267 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
268 */
269static int
270disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
205{ 271{
206 tp->flags &= ~flag; 272 int ret = 0;
273
274 mutex_lock(&probe_enable_lock);
275
276 if (file) {
277 struct ftrace_event_file **new, **old = tp->files;
278 int n = trace_probe_nr_files(tp);
279 int i, j;
280
281 if (n == 0 || trace_probe_file_index(tp, file) < 0) {
282 ret = -EINVAL;
283 goto out_unlock;
284 }
285
286 if (n == 1) { /* Remove the last file */
287 tp->flags &= ~TP_FLAG_TRACE;
288 new = NULL;
289 } else {
290 new = kzalloc(n * sizeof(struct ftrace_event_file *),
291 GFP_KERNEL);
292 if (!new) {
293 ret = -ENOMEM;
294 goto out_unlock;
295 }
296
297 /* This copy & check loop copies the NULL stopper too */
298 for (i = 0, j = 0; j < n && i < n + 1; i++)
299 if (old[i] != file)
300 new[j++] = old[i];
301 }
302
303 rcu_assign_pointer(tp->files, new);
304
305 /* Make sure the probe is done with old files */
306 synchronize_sched();
307 kfree(old);
308 } else
309 tp->flags &= ~TP_FLAG_PROFILE;
310
207 if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) { 311 if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
208 if (trace_probe_is_return(tp)) 312 if (trace_probe_is_return(tp))
209 disable_kretprobe(&tp->rp); 313 disable_kretprobe(&tp->rp);
210 else 314 else
211 disable_kprobe(&tp->rp.kp); 315 disable_kprobe(&tp->rp.kp);
212 } 316 }
317
318 out_unlock:
319 mutex_unlock(&probe_enable_lock);
320
321 return ret;
213} 322}
214 323
215/* Internal register function - just handle k*probes and flags */ 324/* Internal register function - just handle k*probes and flags */
@@ -723,9 +832,10 @@ static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
723} 832}
724 833
725/* Kprobe handler */ 834/* Kprobe handler */
726static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) 835static __kprobes void
836__kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
837 struct ftrace_event_file *ftrace_file)
727{ 838{
728 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
729 struct kprobe_trace_entry_head *entry; 839 struct kprobe_trace_entry_head *entry;
730 struct ring_buffer_event *event; 840 struct ring_buffer_event *event;
731 struct ring_buffer *buffer; 841 struct ring_buffer *buffer;
@@ -733,7 +843,10 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
733 unsigned long irq_flags; 843 unsigned long irq_flags;
734 struct ftrace_event_call *call = &tp->call; 844 struct ftrace_event_call *call = &tp->call;
735 845
736 tp->nhit++; 846 WARN_ON(call != ftrace_file->event_call);
847
848 if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
849 return;
737 850
738 local_save_flags(irq_flags); 851 local_save_flags(irq_flags);
739 pc = preempt_count(); 852 pc = preempt_count();
@@ -741,13 +854,14 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
741 dsize = __get_data_size(tp, regs); 854 dsize = __get_data_size(tp, regs);
742 size = sizeof(*entry) + tp->size + dsize; 855 size = sizeof(*entry) + tp->size + dsize;
743 856
744 event = trace_current_buffer_lock_reserve(&buffer, call->event.type, 857 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
745 size, irq_flags, pc); 858 call->event.type,
859 size, irq_flags, pc);
746 if (!event) 860 if (!event)
747 return; 861 return;
748 862
749 entry = ring_buffer_event_data(event); 863 entry = ring_buffer_event_data(event);
750 entry->ip = (unsigned long)kp->addr; 864 entry->ip = (unsigned long)tp->rp.kp.addr;
751 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 865 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
752 866
753 if (!filter_current_check_discard(buffer, call, entry, event)) 867 if (!filter_current_check_discard(buffer, call, entry, event))
@@ -755,11 +869,24 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
755 irq_flags, pc, regs); 869 irq_flags, pc, regs);
756} 870}
757 871
872static __kprobes void
873kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
874{
875 struct ftrace_event_file **file = tp->files;
876
877 /* Note: preempt is already disabled around the kprobe handler */
878 while (*file) {
879 __kprobe_trace_func(tp, regs, *file);
880 file++;
881 }
882}
883
758/* Kretprobe handler */ 884/* Kretprobe handler */
759static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, 885static __kprobes void
760 struct pt_regs *regs) 886__kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
887 struct pt_regs *regs,
888 struct ftrace_event_file *ftrace_file)
761{ 889{
762 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
763 struct kretprobe_trace_entry_head *entry; 890 struct kretprobe_trace_entry_head *entry;
764 struct ring_buffer_event *event; 891 struct ring_buffer_event *event;
765 struct ring_buffer *buffer; 892 struct ring_buffer *buffer;
@@ -767,14 +894,20 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
767 unsigned long irq_flags; 894 unsigned long irq_flags;
768 struct ftrace_event_call *call = &tp->call; 895 struct ftrace_event_call *call = &tp->call;
769 896
897 WARN_ON(call != ftrace_file->event_call);
898
899 if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
900 return;
901
770 local_save_flags(irq_flags); 902 local_save_flags(irq_flags);
771 pc = preempt_count(); 903 pc = preempt_count();
772 904
773 dsize = __get_data_size(tp, regs); 905 dsize = __get_data_size(tp, regs);
774 size = sizeof(*entry) + tp->size + dsize; 906 size = sizeof(*entry) + tp->size + dsize;
775 907
776 event = trace_current_buffer_lock_reserve(&buffer, call->event.type, 908 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
777 size, irq_flags, pc); 909 call->event.type,
910 size, irq_flags, pc);
778 if (!event) 911 if (!event)
779 return; 912 return;
780 913
@@ -788,6 +921,19 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
788 irq_flags, pc, regs); 921 irq_flags, pc, regs);
789} 922}
790 923
924static __kprobes void
925kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
926 struct pt_regs *regs)
927{
928 struct ftrace_event_file **file = tp->files;
929
930 /* Note: preempt is already disabled around the kprobe handler */
931 while (*file) {
932 __kretprobe_trace_func(tp, ri, regs, *file);
933 file++;
934 }
935}
936
791/* Event entry printers */ 937/* Event entry printers */
792enum print_line_t 938enum print_line_t
793print_kprobe_event(struct trace_iterator *iter, int flags, 939print_kprobe_event(struct trace_iterator *iter, int flags,
@@ -975,10 +1121,9 @@ static int set_print_fmt(struct trace_probe *tp)
975#ifdef CONFIG_PERF_EVENTS 1121#ifdef CONFIG_PERF_EVENTS
976 1122
977/* Kprobe profile handler */ 1123/* Kprobe profile handler */
978static __kprobes void kprobe_perf_func(struct kprobe *kp, 1124static __kprobes void
979 struct pt_regs *regs) 1125kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
980{ 1126{
981 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
982 struct ftrace_event_call *call = &tp->call; 1127 struct ftrace_event_call *call = &tp->call;
983 struct kprobe_trace_entry_head *entry; 1128 struct kprobe_trace_entry_head *entry;
984 struct hlist_head *head; 1129 struct hlist_head *head;
@@ -997,7 +1142,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
997 if (!entry) 1142 if (!entry)
998 return; 1143 return;
999 1144
1000 entry->ip = (unsigned long)kp->addr; 1145 entry->ip = (unsigned long)tp->rp.kp.addr;
1001 memset(&entry[1], 0, dsize); 1146 memset(&entry[1], 0, dsize);
1002 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 1147 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1003 1148
@@ -1007,10 +1152,10 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
1007} 1152}
1008 1153
1009/* Kretprobe profile handler */ 1154/* Kretprobe profile handler */
1010static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, 1155static __kprobes void
1011 struct pt_regs *regs) 1156kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
1157 struct pt_regs *regs)
1012{ 1158{
1013 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1014 struct ftrace_event_call *call = &tp->call; 1159 struct ftrace_event_call *call = &tp->call;
1015 struct kretprobe_trace_entry_head *entry; 1160 struct kretprobe_trace_entry_head *entry;
1016 struct hlist_head *head; 1161 struct hlist_head *head;
@@ -1044,20 +1189,19 @@ int kprobe_register(struct ftrace_event_call *event,
1044 enum trace_reg type, void *data) 1189 enum trace_reg type, void *data)
1045{ 1190{
1046 struct trace_probe *tp = (struct trace_probe *)event->data; 1191 struct trace_probe *tp = (struct trace_probe *)event->data;
1192 struct ftrace_event_file *file = data;
1047 1193
1048 switch (type) { 1194 switch (type) {
1049 case TRACE_REG_REGISTER: 1195 case TRACE_REG_REGISTER:
1050 return enable_trace_probe(tp, TP_FLAG_TRACE); 1196 return enable_trace_probe(tp, file);
1051 case TRACE_REG_UNREGISTER: 1197 case TRACE_REG_UNREGISTER:
1052 disable_trace_probe(tp, TP_FLAG_TRACE); 1198 return disable_trace_probe(tp, file);
1053 return 0;
1054 1199
1055#ifdef CONFIG_PERF_EVENTS 1200#ifdef CONFIG_PERF_EVENTS
1056 case TRACE_REG_PERF_REGISTER: 1201 case TRACE_REG_PERF_REGISTER:
1057 return enable_trace_probe(tp, TP_FLAG_PROFILE); 1202 return enable_trace_probe(tp, NULL);
1058 case TRACE_REG_PERF_UNREGISTER: 1203 case TRACE_REG_PERF_UNREGISTER:
1059 disable_trace_probe(tp, TP_FLAG_PROFILE); 1204 return disable_trace_probe(tp, NULL);
1060 return 0;
1061 case TRACE_REG_PERF_OPEN: 1205 case TRACE_REG_PERF_OPEN:
1062 case TRACE_REG_PERF_CLOSE: 1206 case TRACE_REG_PERF_CLOSE:
1063 case TRACE_REG_PERF_ADD: 1207 case TRACE_REG_PERF_ADD:
@@ -1073,11 +1217,13 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1073{ 1217{
1074 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1218 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1075 1219
1220 tp->nhit++;
1221
1076 if (tp->flags & TP_FLAG_TRACE) 1222 if (tp->flags & TP_FLAG_TRACE)
1077 kprobe_trace_func(kp, regs); 1223 kprobe_trace_func(tp, regs);
1078#ifdef CONFIG_PERF_EVENTS 1224#ifdef CONFIG_PERF_EVENTS
1079 if (tp->flags & TP_FLAG_PROFILE) 1225 if (tp->flags & TP_FLAG_PROFILE)
1080 kprobe_perf_func(kp, regs); 1226 kprobe_perf_func(tp, regs);
1081#endif 1227#endif
1082 return 0; /* We don't tweek kernel, so just return 0 */ 1228 return 0; /* We don't tweek kernel, so just return 0 */
1083} 1229}
@@ -1087,11 +1233,13 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1087{ 1233{
1088 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1234 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1089 1235
1236 tp->nhit++;
1237
1090 if (tp->flags & TP_FLAG_TRACE) 1238 if (tp->flags & TP_FLAG_TRACE)
1091 kretprobe_trace_func(ri, regs); 1239 kretprobe_trace_func(tp, ri, regs);
1092#ifdef CONFIG_PERF_EVENTS 1240#ifdef CONFIG_PERF_EVENTS
1093 if (tp->flags & TP_FLAG_PROFILE) 1241 if (tp->flags & TP_FLAG_PROFILE)
1094 kretprobe_perf_func(ri, regs); 1242 kretprobe_perf_func(tp, ri, regs);
1095#endif 1243#endif
1096 return 0; /* We don't tweek kernel, so just return 0 */ 1244 return 0; /* We don't tweek kernel, so just return 0 */
1097} 1245}
@@ -1189,11 +1337,24 @@ static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
1189 return a1 + a2 + a3 + a4 + a5 + a6; 1337 return a1 + a2 + a3 + a4 + a5 + a6;
1190} 1338}
1191 1339
1340static struct ftrace_event_file *
1341find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
1342{
1343 struct ftrace_event_file *file;
1344
1345 list_for_each_entry(file, &tr->events, list)
1346 if (file->event_call == &tp->call)
1347 return file;
1348
1349 return NULL;
1350}
1351
1192static __init int kprobe_trace_self_tests_init(void) 1352static __init int kprobe_trace_self_tests_init(void)
1193{ 1353{
1194 int ret, warn = 0; 1354 int ret, warn = 0;
1195 int (*target)(int, int, int, int, int, int); 1355 int (*target)(int, int, int, int, int, int);
1196 struct trace_probe *tp; 1356 struct trace_probe *tp;
1357 struct ftrace_event_file *file;
1197 1358
1198 target = kprobe_trace_selftest_target; 1359 target = kprobe_trace_selftest_target;
1199 1360
@@ -1203,31 +1364,43 @@ static __init int kprobe_trace_self_tests_init(void)
1203 "$stack $stack0 +0($stack)", 1364 "$stack $stack0 +0($stack)",
1204 create_trace_probe); 1365 create_trace_probe);
1205 if (WARN_ON_ONCE(ret)) { 1366 if (WARN_ON_ONCE(ret)) {
1206 pr_warning("error on probing function entry.\n"); 1367 pr_warn("error on probing function entry.\n");
1207 warn++; 1368 warn++;
1208 } else { 1369 } else {
1209 /* Enable trace point */ 1370 /* Enable trace point */
1210 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM); 1371 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1211 if (WARN_ON_ONCE(tp == NULL)) { 1372 if (WARN_ON_ONCE(tp == NULL)) {
1212 pr_warning("error on getting new probe.\n"); 1373 pr_warn("error on getting new probe.\n");
1213 warn++; 1374 warn++;
1214 } else 1375 } else {
1215 enable_trace_probe(tp, TP_FLAG_TRACE); 1376 file = find_trace_probe_file(tp, top_trace_array());
1377 if (WARN_ON_ONCE(file == NULL)) {
1378 pr_warn("error on getting probe file.\n");
1379 warn++;
1380 } else
1381 enable_trace_probe(tp, file);
1382 }
1216 } 1383 }
1217 1384
1218 ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target " 1385 ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1219 "$retval", create_trace_probe); 1386 "$retval", create_trace_probe);
1220 if (WARN_ON_ONCE(ret)) { 1387 if (WARN_ON_ONCE(ret)) {
1221 pr_warning("error on probing function return.\n"); 1388 pr_warn("error on probing function return.\n");
1222 warn++; 1389 warn++;
1223 } else { 1390 } else {
1224 /* Enable trace point */ 1391 /* Enable trace point */
1225 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM); 1392 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1226 if (WARN_ON_ONCE(tp == NULL)) { 1393 if (WARN_ON_ONCE(tp == NULL)) {
1227 pr_warning("error on getting new probe.\n"); 1394 pr_warn("error on getting 2nd new probe.\n");
1228 warn++; 1395 warn++;
1229 } else 1396 } else {
1230 enable_trace_probe(tp, TP_FLAG_TRACE); 1397 file = find_trace_probe_file(tp, top_trace_array());
1398 if (WARN_ON_ONCE(file == NULL)) {
1399 pr_warn("error on getting probe file.\n");
1400 warn++;
1401 } else
1402 enable_trace_probe(tp, file);
1403 }
1231 } 1404 }
1232 1405
1233 if (warn) 1406 if (warn)
@@ -1238,27 +1411,39 @@ static __init int kprobe_trace_self_tests_init(void)
1238 /* Disable trace points before removing it */ 1411 /* Disable trace points before removing it */
1239 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM); 1412 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1240 if (WARN_ON_ONCE(tp == NULL)) { 1413 if (WARN_ON_ONCE(tp == NULL)) {
1241 pr_warning("error on getting test probe.\n"); 1414 pr_warn("error on getting test probe.\n");
1242 warn++; 1415 warn++;
1243 } else 1416 } else {
1244 disable_trace_probe(tp, TP_FLAG_TRACE); 1417 file = find_trace_probe_file(tp, top_trace_array());
1418 if (WARN_ON_ONCE(file == NULL)) {
1419 pr_warn("error on getting probe file.\n");
1420 warn++;
1421 } else
1422 disable_trace_probe(tp, file);
1423 }
1245 1424
1246 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM); 1425 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1247 if (WARN_ON_ONCE(tp == NULL)) { 1426 if (WARN_ON_ONCE(tp == NULL)) {
1248 pr_warning("error on getting 2nd test probe.\n"); 1427 pr_warn("error on getting 2nd test probe.\n");
1249 warn++; 1428 warn++;
1250 } else 1429 } else {
1251 disable_trace_probe(tp, TP_FLAG_TRACE); 1430 file = find_trace_probe_file(tp, top_trace_array());
1431 if (WARN_ON_ONCE(file == NULL)) {
1432 pr_warn("error on getting probe file.\n");
1433 warn++;
1434 } else
1435 disable_trace_probe(tp, file);
1436 }
1252 1437
1253 ret = traceprobe_command("-:testprobe", create_trace_probe); 1438 ret = traceprobe_command("-:testprobe", create_trace_probe);
1254 if (WARN_ON_ONCE(ret)) { 1439 if (WARN_ON_ONCE(ret)) {
1255 pr_warning("error on deleting a probe.\n"); 1440 pr_warn("error on deleting a probe.\n");
1256 warn++; 1441 warn++;
1257 } 1442 }
1258 1443
1259 ret = traceprobe_command("-:testprobe2", create_trace_probe); 1444 ret = traceprobe_command("-:testprobe2", create_trace_probe);
1260 if (WARN_ON_ONCE(ret)) { 1445 if (WARN_ON_ONCE(ret)) {
1261 pr_warning("error on deleting a probe.\n"); 1446 pr_warn("error on deleting a probe.\n");
1262 warn++; 1447 warn++;
1263 } 1448 }
1264 1449
diff --git a/net/socket.c b/net/socket.c
index b416093997da..6b94633ca61d 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2412,7 +2412,7 @@ static const unsigned char nargs[21] = {
2412 2412
2413SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) 2413SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
2414{ 2414{
2415 unsigned long a[6]; 2415 unsigned long a[AUDITSC_ARGS];
2416 unsigned long a0, a1; 2416 unsigned long a0, a1;
2417 int err; 2417 int err;
2418 unsigned int len; 2418 unsigned int len;
@@ -2428,7 +2428,9 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
2428 if (copy_from_user(a, args, len)) 2428 if (copy_from_user(a, args, len))
2429 return -EFAULT; 2429 return -EFAULT;
2430 2430
2431 audit_socketcall(nargs[call] / sizeof(unsigned long), a); 2431 err = audit_socketcall(nargs[call] / sizeof(unsigned long), a);
2432 if (err)
2433 return err;
2432 2434
2433 a0 = a[0]; 2435 a0 = a[0];
2434 a1 = a[1]; 2436 a1 = a[1];
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index 5c4c61d527e2..357f613df7ff 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -21,16 +21,6 @@
21#include <linux/sunrpc/svcauth.h> 21#include <linux/sunrpc/svcauth.h>
22#include "gss_rpc_xdr.h" 22#include "gss_rpc_xdr.h"
23 23
24static bool gssx_check_pointer(struct xdr_stream *xdr)
25{
26 __be32 *p;
27
28 p = xdr_reserve_space(xdr, 4);
29 if (unlikely(p == NULL))
30 return -ENOSPC;
31 return *p?true:false;
32}
33
34static int gssx_enc_bool(struct xdr_stream *xdr, int v) 24static int gssx_enc_bool(struct xdr_stream *xdr, int v)
35{ 25{
36 __be32 *p; 26 __be32 *p;
@@ -264,25 +254,27 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
264 if (unlikely(p == NULL)) 254 if (unlikely(p == NULL))
265 return -ENOSPC; 255 return -ENOSPC;
266 count = be32_to_cpup(p++); 256 count = be32_to_cpup(p++);
267 if (count != 0) { 257 if (!count)
268 /* we recognize only 1 currently: CREDS_VALUE */ 258 return 0;
269 oa->count = 1;
270 259
271 oa->data = kmalloc(sizeof(struct gssx_option), GFP_KERNEL); 260 /* we recognize only 1 currently: CREDS_VALUE */
272 if (!oa->data) 261 oa->count = 1;
273 return -ENOMEM;
274 262
275 creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL); 263 oa->data = kmalloc(sizeof(struct gssx_option), GFP_KERNEL);
276 if (!creds) { 264 if (!oa->data)
277 kfree(oa->data); 265 return -ENOMEM;
278 return -ENOMEM;
279 }
280 266
281 oa->data[0].option.data = CREDS_VALUE; 267 creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
282 oa->data[0].option.len = sizeof(CREDS_VALUE); 268 if (!creds) {
283 oa->data[0].value.data = (void *)creds; 269 kfree(oa->data);
284 oa->data[0].value.len = 0; 270 return -ENOMEM;
285 } 271 }
272
273 oa->data[0].option.data = CREDS_VALUE;
274 oa->data[0].option.len = sizeof(CREDS_VALUE);
275 oa->data[0].value.data = (void *)creds;
276 oa->data[0].value.len = 0;
277
286 for (i = 0; i < count; i++) { 278 for (i = 0; i < count; i++) {
287 gssx_buffer dummy = { 0, NULL }; 279 gssx_buffer dummy = { 0, NULL };
288 u32 length; 280 u32 length;
@@ -800,6 +792,7 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
800 struct xdr_stream *xdr, 792 struct xdr_stream *xdr,
801 struct gssx_res_accept_sec_context *res) 793 struct gssx_res_accept_sec_context *res)
802{ 794{
795 u32 value_follows;
803 int err; 796 int err;
804 797
805 /* res->status */ 798 /* res->status */
@@ -808,7 +801,10 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
808 return err; 801 return err;
809 802
810 /* res->context_handle */ 803 /* res->context_handle */
811 if (gssx_check_pointer(xdr)) { 804 err = gssx_dec_bool(xdr, &value_follows);
805 if (err)
806 return err;
807 if (value_follows) {
812 err = gssx_dec_ctx(xdr, res->context_handle); 808 err = gssx_dec_ctx(xdr, res->context_handle);
813 if (err) 809 if (err)
814 return err; 810 return err;
@@ -817,7 +813,10 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
817 } 813 }
818 814
819 /* res->output_token */ 815 /* res->output_token */
820 if (gssx_check_pointer(xdr)) { 816 err = gssx_dec_bool(xdr, &value_follows);
817 if (err)
818 return err;
819 if (value_follows) {
821 err = gssx_dec_buffer(xdr, res->output_token); 820 err = gssx_dec_buffer(xdr, res->output_token);
822 if (err) 821 if (err)
823 return err; 822 return err;
@@ -826,7 +825,10 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
826 } 825 }
827 826
828 /* res->delegated_cred_handle */ 827 /* res->delegated_cred_handle */
829 if (gssx_check_pointer(xdr)) { 828 err = gssx_dec_bool(xdr, &value_follows);
829 if (err)
830 return err;
831 if (value_follows) {
830 /* we do not support upcall servers sending this data. */ 832 /* we do not support upcall servers sending this data. */
831 return -EINVAL; 833 return -EINVAL;
832 } 834 }
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c
index 071ce1b5f2b4..872d59e35ee2 100644
--- a/sound/atmel/abdac.c
+++ b/sound/atmel/abdac.c
@@ -583,8 +583,6 @@ static int atmel_abdac_remove(struct platform_device *pdev)
583 free_irq(dac->irq, dac); 583 free_irq(dac->irq, dac);
584 snd_card_free(card); 584 snd_card_free(card);
585 585
586 platform_set_drvdata(pdev, NULL);
587
588 return 0; 586 return 0;
589} 587}
590 588
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index 6b7e2b5a72de..ae63d22c0f88 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -1199,8 +1199,6 @@ static int atmel_ac97c_remove(struct platform_device *pdev)
1199 snd_card_set_dev(card, NULL); 1199 snd_card_set_dev(card, NULL);
1200 snd_card_free(card); 1200 snd_card_free(card);
1201 1201
1202 platform_set_drvdata(pdev, NULL);
1203
1204 return 0; 1202 return 0;
1205} 1203}
1206 1204
diff --git a/sound/mips/hal2.c b/sound/mips/hal2.c
index 7420c59444ab..2b7f6e8bdd24 100644
--- a/sound/mips/hal2.c
+++ b/sound/mips/hal2.c
@@ -922,7 +922,6 @@ static int hal2_remove(struct platform_device *pdev)
922 struct snd_card *card = platform_get_drvdata(pdev); 922 struct snd_card *card = platform_get_drvdata(pdev);
923 923
924 snd_card_free(card); 924 snd_card_free(card);
925 platform_set_drvdata(pdev, NULL);
926 return 0; 925 return 0;
927} 926}
928 927
diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
index 01a03efdc8b0..cfe99ae149fe 100644
--- a/sound/mips/sgio2audio.c
+++ b/sound/mips/sgio2audio.c
@@ -963,7 +963,6 @@ static int snd_sgio2audio_remove(struct platform_device *pdev)
963 struct snd_card *card = platform_get_drvdata(pdev); 963 struct snd_card *card = platform_get_drvdata(pdev);
964 964
965 snd_card_free(card); 965 snd_card_free(card);
966 platform_set_drvdata(pdev, NULL);
967 return 0; 966 return 0;
968} 967}
969 968
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
index 5849b129e50d..51c4ba95a32d 100644
--- a/sound/oss/Kconfig
+++ b/sound/oss/Kconfig
@@ -250,6 +250,7 @@ config MSND_FIFOSIZE
250menuconfig SOUND_OSS 250menuconfig SOUND_OSS
251 tristate "OSS sound modules" 251 tristate "OSS sound modules"
252 depends on ISA_DMA_API && VIRT_TO_BUS 252 depends on ISA_DMA_API && VIRT_TO_BUS
253 depends on !ISA_DMA_SUPPORT_BROKEN
253 help 254 help
254 OSS is the Open Sound System suite of sound card drivers. They make 255 OSS is the Open Sound System suite of sound card drivers. They make
255 sound programming easier since they provide a common API. Say Y or 256 sound programming easier since they provide a common API. Say Y or
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 6f9b64700f6e..55108b5fb291 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -681,6 +681,9 @@ int snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex)
681 struct hda_bus_unsolicited *unsol; 681 struct hda_bus_unsolicited *unsol;
682 unsigned int wp; 682 unsigned int wp;
683 683
684 if (!bus || !bus->workq)
685 return 0;
686
684 trace_hda_unsol_event(bus, res, res_ex); 687 trace_hda_unsol_event(bus, res, res_ex);
685 unsol = bus->unsol; 688 unsol = bus->unsol;
686 if (!unsol) 689 if (!unsol)
@@ -1580,7 +1583,7 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
1580 "NID=0x%x, stream=0x%x, channel=%d, format=0x%x\n", 1583 "NID=0x%x, stream=0x%x, channel=%d, format=0x%x\n",
1581 nid, stream_tag, channel_id, format); 1584 nid, stream_tag, channel_id, format);
1582 p = get_hda_cvt_setup(codec, nid); 1585 p = get_hda_cvt_setup(codec, nid);
1583 if (!p || p->active) 1586 if (!p)
1584 return; 1587 return;
1585 1588
1586 if (codec->pcm_format_first) 1589 if (codec->pcm_format_first)
@@ -1627,7 +1630,7 @@ void __snd_hda_codec_cleanup_stream(struct hda_codec *codec, hda_nid_t nid,
1627 1630
1628 snd_printdd("hda_codec_cleanup_stream: NID=0x%x\n", nid); 1631 snd_printdd("hda_codec_cleanup_stream: NID=0x%x\n", nid);
1629 p = get_hda_cvt_setup(codec, nid); 1632 p = get_hda_cvt_setup(codec, nid);
1630 if (p && p->active) { 1633 if (p) {
1631 /* here we just clear the active flag when do_now isn't set; 1634 /* here we just clear the active flag when do_now isn't set;
1632 * actual clean-ups will be done later in 1635 * actual clean-ups will be done later in
1633 * purify_inactive_streams() called from snd_hda_codec_prpapre() 1636 * purify_inactive_streams() called from snd_hda_codec_prpapre()
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 7b213d589ef6..de18722c4873 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -615,7 +615,7 @@ enum {
615/* quirks for Nvidia */ 615/* quirks for Nvidia */
616#define AZX_DCAPS_PRESET_NVIDIA \ 616#define AZX_DCAPS_PRESET_NVIDIA \
617 (AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI |\ 617 (AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI |\
618 AZX_DCAPS_ALIGN_BUFSIZE) 618 AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT)
619 619
620#define AZX_DCAPS_PRESET_CTHDA \ 620#define AZX_DCAPS_PRESET_CTHDA \
621 (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY) 621 (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY)
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 84b81c874a4a..b314d3e6d7fa 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -64,6 +64,7 @@ struct conexant_spec {
64 /* extra EAPD pins */ 64 /* extra EAPD pins */
65 unsigned int num_eapds; 65 unsigned int num_eapds;
66 hda_nid_t eapds[4]; 66 hda_nid_t eapds[4];
67 bool dynamic_eapd;
67 68
68#ifdef ENABLE_CXT_STATIC_QUIRKS 69#ifdef ENABLE_CXT_STATIC_QUIRKS
69 const struct snd_kcontrol_new *mixers[5]; 70 const struct snd_kcontrol_new *mixers[5];
@@ -3155,7 +3156,7 @@ static void cx_auto_parse_eapd(struct hda_codec *codec)
3155 * thus it might control over all pins. 3156 * thus it might control over all pins.
3156 */ 3157 */
3157 if (spec->num_eapds > 2) 3158 if (spec->num_eapds > 2)
3158 spec->gen.own_eapd_ctl = 1; 3159 spec->dynamic_eapd = 1;
3159} 3160}
3160 3161
3161static void cx_auto_turn_eapd(struct hda_codec *codec, int num_pins, 3162static void cx_auto_turn_eapd(struct hda_codec *codec, int num_pins,
@@ -3194,10 +3195,19 @@ static int cx_auto_build_controls(struct hda_codec *codec)
3194 return 0; 3195 return 0;
3195} 3196}
3196 3197
3198static int cx_auto_init(struct hda_codec *codec)
3199{
3200 struct conexant_spec *spec = codec->spec;
3201 snd_hda_gen_init(codec);
3202 if (!spec->dynamic_eapd)
3203 cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, true);
3204 return 0;
3205}
3206
3197static const struct hda_codec_ops cx_auto_patch_ops = { 3207static const struct hda_codec_ops cx_auto_patch_ops = {
3198 .build_controls = cx_auto_build_controls, 3208 .build_controls = cx_auto_build_controls,
3199 .build_pcms = snd_hda_gen_build_pcms, 3209 .build_pcms = snd_hda_gen_build_pcms,
3200 .init = snd_hda_gen_init, 3210 .init = cx_auto_init,
3201 .free = snd_hda_gen_free, 3211 .free = snd_hda_gen_free,
3202 .unsol_event = snd_hda_jack_unsol_event, 3212 .unsol_event = snd_hda_jack_unsol_event,
3203#ifdef CONFIG_PM 3213#ifdef CONFIG_PM
@@ -3348,7 +3358,8 @@ static int patch_conexant_auto(struct hda_codec *codec)
3348 3358
3349 cx_auto_parse_beep(codec); 3359 cx_auto_parse_beep(codec);
3350 cx_auto_parse_eapd(codec); 3360 cx_auto_parse_eapd(codec);
3351 if (spec->gen.own_eapd_ctl) 3361 spec->gen.own_eapd_ctl = 1;
3362 if (spec->dynamic_eapd)
3352 spec->gen.vmaster_mute.hook = cx_auto_vmaster_hook; 3363 spec->gen.vmaster_mute.hook = cx_auto_vmaster_hook;
3353 3364
3354 switch (codec->vendor_id) { 3365 switch (codec->vendor_id) {
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 32930e668854..e12f7a030c58 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1832,12 +1832,10 @@ static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
1832#define INTEL_EN_ALL_PIN_CVTS 0x01 /* enable 2nd & 3rd pins and convertors */ 1832#define INTEL_EN_ALL_PIN_CVTS 0x01 /* enable 2nd & 3rd pins and convertors */
1833 1833
1834static void intel_haswell_enable_all_pins(struct hda_codec *codec, 1834static void intel_haswell_enable_all_pins(struct hda_codec *codec,
1835 const struct hda_fixup *fix, int action) 1835 bool update_tree)
1836{ 1836{
1837 unsigned int vendor_param; 1837 unsigned int vendor_param;
1838 1838
1839 if (action != HDA_FIXUP_ACT_PRE_PROBE)
1840 return;
1841 vendor_param = snd_hda_codec_read(codec, INTEL_VENDOR_NID, 0, 1839 vendor_param = snd_hda_codec_read(codec, INTEL_VENDOR_NID, 0,
1842 INTEL_GET_VENDOR_VERB, 0); 1840 INTEL_GET_VENDOR_VERB, 0);
1843 if (vendor_param == -1 || vendor_param & INTEL_EN_ALL_PIN_CVTS) 1841 if (vendor_param == -1 || vendor_param & INTEL_EN_ALL_PIN_CVTS)
@@ -1849,8 +1847,8 @@ static void intel_haswell_enable_all_pins(struct hda_codec *codec,
1849 if (vendor_param == -1) 1847 if (vendor_param == -1)
1850 return; 1848 return;
1851 1849
1852 snd_hda_codec_update_widgets(codec); 1850 if (update_tree)
1853 return; 1851 snd_hda_codec_update_widgets(codec);
1854} 1852}
1855 1853
1856static void intel_haswell_fixup_enable_dp12(struct hda_codec *codec) 1854static void intel_haswell_fixup_enable_dp12(struct hda_codec *codec)
@@ -1868,30 +1866,20 @@ static void intel_haswell_fixup_enable_dp12(struct hda_codec *codec)
1868 INTEL_SET_VENDOR_VERB, vendor_param); 1866 INTEL_SET_VENDOR_VERB, vendor_param);
1869} 1867}
1870 1868
1869/* Haswell needs to re-issue the vendor-specific verbs before turning to D0.
1870 * Otherwise you may get severe h/w communication errors.
1871 */
1872static void haswell_set_power_state(struct hda_codec *codec, hda_nid_t fg,
1873 unsigned int power_state)
1874{
1875 if (power_state == AC_PWRST_D0) {
1876 intel_haswell_enable_all_pins(codec, false);
1877 intel_haswell_fixup_enable_dp12(codec);
1878 }
1871 1879
1872 1880 snd_hda_codec_read(codec, fg, 0, AC_VERB_SET_POWER_STATE, power_state);
1873/* available models for fixup */ 1881 snd_hda_codec_set_power_to_all(codec, fg, power_state);
1874enum { 1882}
1875 INTEL_HASWELL,
1876};
1877
1878static const struct hda_model_fixup hdmi_models[] = {
1879 {.id = INTEL_HASWELL, .name = "Haswell"},
1880 {}
1881};
1882
1883static const struct snd_pci_quirk hdmi_fixup_tbl[] = {
1884 SND_PCI_QUIRK(0x8086, 0x2010, "Haswell", INTEL_HASWELL),
1885 {} /* terminator */
1886};
1887
1888static const struct hda_fixup hdmi_fixups[] = {
1889 [INTEL_HASWELL] = {
1890 .type = HDA_FIXUP_FUNC,
1891 .v.func = intel_haswell_enable_all_pins,
1892 },
1893};
1894
1895 1883
1896static int patch_generic_hdmi(struct hda_codec *codec) 1884static int patch_generic_hdmi(struct hda_codec *codec)
1897{ 1885{
@@ -1904,11 +1892,10 @@ static int patch_generic_hdmi(struct hda_codec *codec)
1904 codec->spec = spec; 1892 codec->spec = spec;
1905 hdmi_array_init(spec, 4); 1893 hdmi_array_init(spec, 4);
1906 1894
1907 snd_hda_pick_fixup(codec, hdmi_models, hdmi_fixup_tbl, hdmi_fixups); 1895 if (codec->vendor_id == 0x80862807) {
1908 snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); 1896 intel_haswell_enable_all_pins(codec, true);
1909
1910 if (codec->vendor_id == 0x80862807)
1911 intel_haswell_fixup_enable_dp12(codec); 1897 intel_haswell_fixup_enable_dp12(codec);
1898 }
1912 1899
1913 if (hdmi_parse_codec(codec) < 0) { 1900 if (hdmi_parse_codec(codec) < 0) {
1914 codec->spec = NULL; 1901 codec->spec = NULL;
@@ -1916,6 +1903,9 @@ static int patch_generic_hdmi(struct hda_codec *codec)
1916 return -EINVAL; 1903 return -EINVAL;
1917 } 1904 }
1918 codec->patch_ops = generic_hdmi_patch_ops; 1905 codec->patch_ops = generic_hdmi_patch_ops;
1906 if (codec->vendor_id == 0x80862807)
1907 codec->patch_ops.set_power_state = haswell_set_power_state;
1908
1919 generic_hdmi_init_per_pins(codec); 1909 generic_hdmi_init_per_pins(codec);
1920 1910
1921 init_channel_allocations(); 1911 init_channel_allocations();
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 14094f558e03..1eb152cb1097 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -2882,6 +2882,7 @@ static int wm8994_aif3_hw_params(struct snd_pcm_substream *substream,
2882 default: 2882 default:
2883 return 0; 2883 return 0;
2884 } 2884 }
2885 break;
2885 default: 2886 default:
2886 return 0; 2887 return 0;
2887 } 2888 }
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 8b85049daab0..56ecfc72f2e9 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -505,7 +505,10 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
505 mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE); 505 mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
506 mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE); 506 mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
507 507
508 mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, ACLKX | AFSX); 508 mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
509 ACLKX | ACLKR);
510 mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
511 AFSX | AFSR);
509 break; 512 break;
510 case SND_SOC_DAIFMT_CBM_CFS: 513 case SND_SOC_DAIFMT_CBM_CFS:
511 /* codec is clock master and frame slave */ 514 /* codec is clock master and frame slave */
@@ -565,7 +568,7 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
565 mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL); 568 mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
566 mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL); 569 mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
567 570
568 mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL); 571 mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
569 mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL); 572 mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
570 break; 573 break;
571 574
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 21779a6a781a..a80c883bb8be 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1095,9 +1095,9 @@ int dapm_clock_event(struct snd_soc_dapm_widget *w,
1095 1095
1096#ifdef CONFIG_HAVE_CLK 1096#ifdef CONFIG_HAVE_CLK
1097 if (SND_SOC_DAPM_EVENT_ON(event)) { 1097 if (SND_SOC_DAPM_EVENT_ON(event)) {
1098 return clk_enable(w->clk); 1098 return clk_prepare_enable(w->clk);
1099 } else { 1099 } else {
1100 clk_disable(w->clk); 1100 clk_disable_unprepare(w->clk);
1101 return 0; 1101 return 0;
1102 } 1102 }
1103#endif 1103#endif
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 321e066a0753..9e9d34871195 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -46,6 +46,7 @@ unsigned int skip_c0;
46unsigned int skip_c1; 46unsigned int skip_c1;
47unsigned int do_nhm_cstates; 47unsigned int do_nhm_cstates;
48unsigned int do_snb_cstates; 48unsigned int do_snb_cstates;
49unsigned int do_c8_c9_c10;
49unsigned int has_aperf; 50unsigned int has_aperf;
50unsigned int has_epb; 51unsigned int has_epb;
51unsigned int units = 1000000000; /* Ghz etc */ 52unsigned int units = 1000000000; /* Ghz etc */
@@ -120,6 +121,9 @@ struct pkg_data {
120 unsigned long long pc3; 121 unsigned long long pc3;
121 unsigned long long pc6; 122 unsigned long long pc6;
122 unsigned long long pc7; 123 unsigned long long pc7;
124 unsigned long long pc8;
125 unsigned long long pc9;
126 unsigned long long pc10;
123 unsigned int package_id; 127 unsigned int package_id;
124 unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */ 128 unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */
125 unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */ 129 unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */
@@ -282,6 +286,11 @@ void print_header(void)
282 outp += sprintf(outp, " %%pc6"); 286 outp += sprintf(outp, " %%pc6");
283 if (do_snb_cstates) 287 if (do_snb_cstates)
284 outp += sprintf(outp, " %%pc7"); 288 outp += sprintf(outp, " %%pc7");
289 if (do_c8_c9_c10) {
290 outp += sprintf(outp, " %%pc8");
291 outp += sprintf(outp, " %%pc9");
292 outp += sprintf(outp, " %%pc10");
293 }
285 294
286 if (do_rapl & RAPL_PKG) 295 if (do_rapl & RAPL_PKG)
287 outp += sprintf(outp, " Pkg_W"); 296 outp += sprintf(outp, " Pkg_W");
@@ -336,6 +345,9 @@ int dump_counters(struct thread_data *t, struct core_data *c,
336 fprintf(stderr, "pc3: %016llX\n", p->pc3); 345 fprintf(stderr, "pc3: %016llX\n", p->pc3);
337 fprintf(stderr, "pc6: %016llX\n", p->pc6); 346 fprintf(stderr, "pc6: %016llX\n", p->pc6);
338 fprintf(stderr, "pc7: %016llX\n", p->pc7); 347 fprintf(stderr, "pc7: %016llX\n", p->pc7);
348 fprintf(stderr, "pc8: %016llX\n", p->pc8);
349 fprintf(stderr, "pc9: %016llX\n", p->pc9);
350 fprintf(stderr, "pc10: %016llX\n", p->pc10);
339 fprintf(stderr, "Joules PKG: %0X\n", p->energy_pkg); 351 fprintf(stderr, "Joules PKG: %0X\n", p->energy_pkg);
340 fprintf(stderr, "Joules COR: %0X\n", p->energy_cores); 352 fprintf(stderr, "Joules COR: %0X\n", p->energy_cores);
341 fprintf(stderr, "Joules GFX: %0X\n", p->energy_gfx); 353 fprintf(stderr, "Joules GFX: %0X\n", p->energy_gfx);
@@ -493,6 +505,11 @@ int format_counters(struct thread_data *t, struct core_data *c,
493 outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc); 505 outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc);
494 if (do_snb_cstates) 506 if (do_snb_cstates)
495 outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc); 507 outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc);
508 if (do_c8_c9_c10) {
509 outp += sprintf(outp, " %6.2f", 100.0 * p->pc8/t->tsc);
510 outp += sprintf(outp, " %6.2f", 100.0 * p->pc9/t->tsc);
511 outp += sprintf(outp, " %6.2f", 100.0 * p->pc10/t->tsc);
512 }
496 513
497 /* 514 /*
498 * If measurement interval exceeds minimum RAPL Joule Counter range, 515 * If measurement interval exceeds minimum RAPL Joule Counter range,
@@ -569,6 +586,9 @@ delta_package(struct pkg_data *new, struct pkg_data *old)
569 old->pc3 = new->pc3 - old->pc3; 586 old->pc3 = new->pc3 - old->pc3;
570 old->pc6 = new->pc6 - old->pc6; 587 old->pc6 = new->pc6 - old->pc6;
571 old->pc7 = new->pc7 - old->pc7; 588 old->pc7 = new->pc7 - old->pc7;
589 old->pc8 = new->pc8 - old->pc8;
590 old->pc9 = new->pc9 - old->pc9;
591 old->pc10 = new->pc10 - old->pc10;
572 old->pkg_temp_c = new->pkg_temp_c; 592 old->pkg_temp_c = new->pkg_temp_c;
573 593
574 DELTA_WRAP32(new->energy_pkg, old->energy_pkg); 594 DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
@@ -702,6 +722,9 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
702 p->pc3 = 0; 722 p->pc3 = 0;
703 p->pc6 = 0; 723 p->pc6 = 0;
704 p->pc7 = 0; 724 p->pc7 = 0;
725 p->pc8 = 0;
726 p->pc9 = 0;
727 p->pc10 = 0;
705 728
706 p->energy_pkg = 0; 729 p->energy_pkg = 0;
707 p->energy_dram = 0; 730 p->energy_dram = 0;
@@ -740,6 +763,9 @@ int sum_counters(struct thread_data *t, struct core_data *c,
740 average.packages.pc3 += p->pc3; 763 average.packages.pc3 += p->pc3;
741 average.packages.pc6 += p->pc6; 764 average.packages.pc6 += p->pc6;
742 average.packages.pc7 += p->pc7; 765 average.packages.pc7 += p->pc7;
766 average.packages.pc8 += p->pc8;
767 average.packages.pc9 += p->pc9;
768 average.packages.pc10 += p->pc10;
743 769
744 average.packages.energy_pkg += p->energy_pkg; 770 average.packages.energy_pkg += p->energy_pkg;
745 average.packages.energy_dram += p->energy_dram; 771 average.packages.energy_dram += p->energy_dram;
@@ -781,6 +807,10 @@ void compute_average(struct thread_data *t, struct core_data *c,
781 average.packages.pc3 /= topo.num_packages; 807 average.packages.pc3 /= topo.num_packages;
782 average.packages.pc6 /= topo.num_packages; 808 average.packages.pc6 /= topo.num_packages;
783 average.packages.pc7 /= topo.num_packages; 809 average.packages.pc7 /= topo.num_packages;
810
811 average.packages.pc8 /= topo.num_packages;
812 average.packages.pc9 /= topo.num_packages;
813 average.packages.pc10 /= topo.num_packages;
784} 814}
785 815
786static unsigned long long rdtsc(void) 816static unsigned long long rdtsc(void)
@@ -880,6 +910,14 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
880 if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7)) 910 if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
881 return -12; 911 return -12;
882 } 912 }
913 if (do_c8_c9_c10) {
914 if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
915 return -13;
916 if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
917 return -13;
918 if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
919 return -13;
920 }
883 if (do_rapl & RAPL_PKG) { 921 if (do_rapl & RAPL_PKG) {
884 if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr)) 922 if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
885 return -13; 923 return -13;
@@ -1762,6 +1800,19 @@ int is_snb(unsigned int family, unsigned int model)
1762 return 0; 1800 return 0;
1763} 1801}
1764 1802
1803int has_c8_c9_c10(unsigned int family, unsigned int model)
1804{
1805 if (!genuine_intel)
1806 return 0;
1807
1808 switch (model) {
1809 case 0x45:
1810 return 1;
1811 }
1812 return 0;
1813}
1814
1815
1765double discover_bclk(unsigned int family, unsigned int model) 1816double discover_bclk(unsigned int family, unsigned int model)
1766{ 1817{
1767 if (is_snb(family, model)) 1818 if (is_snb(family, model))
@@ -1918,6 +1969,7 @@ void check_cpuid()
1918 do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */ 1969 do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */
1919 do_smi = do_nhm_cstates; 1970 do_smi = do_nhm_cstates;
1920 do_snb_cstates = is_snb(family, model); 1971 do_snb_cstates = is_snb(family, model);
1972 do_c8_c9_c10 = has_c8_c9_c10(family, model);
1921 bclk = discover_bclk(family, model); 1973 bclk = discover_bclk(family, model);
1922 1974
1923 do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model); 1975 do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
@@ -2279,7 +2331,7 @@ int main(int argc, char **argv)
2279 cmdline(argc, argv); 2331 cmdline(argc, argv);
2280 2332
2281 if (verbose) 2333 if (verbose)
2282 fprintf(stderr, "turbostat v3.3 March 15, 2013" 2334 fprintf(stderr, "turbostat v3.4 April 17, 2013"
2283 " - Len Brown <lenb@kernel.org>\n"); 2335 " - Len Brown <lenb@kernel.org>\n");
2284 2336
2285 turbostat_init(); 2337 turbostat_init();
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 45f09362ee7b..302681c4aa44 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1978,7 +1978,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
1978 if (vcpu->kvm->mm != current->mm) 1978 if (vcpu->kvm->mm != current->mm)
1979 return -EIO; 1979 return -EIO;
1980 1980
1981#if defined(CONFIG_S390) || defined(CONFIG_PPC) 1981#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
1982 /* 1982 /*
1983 * Special cases: vcpu ioctls that are asynchronous to vcpu execution, 1983 * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
1984 * so vcpu_load() would break it. 1984 * so vcpu_load() would break it.
@@ -3105,13 +3105,21 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
3105 int r; 3105 int r;
3106 int cpu; 3106 int cpu;
3107 3107
3108 r = kvm_irqfd_init();
3109 if (r)
3110 goto out_irqfd;
3111 r = kvm_arch_init(opaque); 3108 r = kvm_arch_init(opaque);
3112 if (r) 3109 if (r)
3113 goto out_fail; 3110 goto out_fail;
3114 3111
3112 /*
3113 * kvm_arch_init makes sure there's at most one caller
3114 * for architectures that support multiple implementations,
3115 * like intel and amd on x86.
3116 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
3117 * conflicts in case kvm is already setup for another implementation.
3118 */
3119 r = kvm_irqfd_init();
3120 if (r)
3121 goto out_irqfd;
3122
3115 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 3123 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
3116 r = -ENOMEM; 3124 r = -ENOMEM;
3117 goto out_free_0; 3125 goto out_free_0;
@@ -3186,10 +3194,10 @@ out_free_1:
3186out_free_0a: 3194out_free_0a:
3187 free_cpumask_var(cpus_hardware_enabled); 3195 free_cpumask_var(cpus_hardware_enabled);
3188out_free_0: 3196out_free_0:
3189 kvm_arch_exit();
3190out_fail:
3191 kvm_irqfd_exit(); 3197 kvm_irqfd_exit();
3192out_irqfd: 3198out_irqfd:
3199 kvm_arch_exit();
3200out_fail:
3193 return r; 3201 return r;
3194} 3202}
3195EXPORT_SYMBOL_GPL(kvm_init); 3203EXPORT_SYMBOL_GPL(kvm_init);